id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
15,000
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
wninsd
def wninsd(left, right, window): """ Insert an interval into a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wninsd_c.html :param left: Left endpoints of new interval. :type left: float :param right: Right endpoints of new interval. :type right: float :param window: Input window. :type window: spiceypy.utils.support_types.SpiceCell """ assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 left = ctypes.c_double(left) right = ctypes.c_double(right) libspice.wninsd_c(left, right, ctypes.byref(window))
python
def wninsd(left, right, window): """ Insert an interval into a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wninsd_c.html :param left: Left endpoints of new interval. :type left: float :param right: Right endpoints of new interval. :type right: float :param window: Input window. :type window: spiceypy.utils.support_types.SpiceCell """ assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 left = ctypes.c_double(left) right = ctypes.c_double(right) libspice.wninsd_c(left, right, ctypes.byref(window))
[ "def", "wninsd", "(", "left", ",", "right", ",", "window", ")", ":", "assert", "isinstance", "(", "window", ",", "stypes", ".", "SpiceCell", ")", "assert", "window", ".", "dtype", "==", "1", "left", "=", "ctypes", ".", "c_double", "(", "left", ")", "right", "=", "ctypes", ".", "c_double", "(", "right", ")", "libspice", ".", "wninsd_c", "(", "left", ",", "right", ",", "ctypes", ".", "byref", "(", "window", ")", ")" ]
Insert an interval into a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wninsd_c.html :param left: Left endpoints of new interval. :type left: float :param right: Right endpoints of new interval. :type right: float :param window: Input window. :type window: spiceypy.utils.support_types.SpiceCell
[ "Insert", "an", "interval", "into", "a", "double", "precision", "window", "." ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15541-L15558
15,001
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
wnintd
def wnintd(a, b): """ Place the intersection of two double precision windows into a third window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnintd_c.html :param a: Input window A. :type a: spiceypy.utils.support_types.SpiceCell :param b: Input window B. :type b: spiceypy.utils.support_types.SpiceCell :return: Intersection of a and b. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(a, stypes.SpiceCell) assert b.dtype == 1 assert isinstance(b, stypes.SpiceCell) assert a.dtype == 1 c = stypes.SpiceCell.double(b.size + a.size) libspice.wnintd_c(ctypes.byref(a), ctypes.byref(b), ctypes.byref(c)) return c
python
def wnintd(a, b): """ Place the intersection of two double precision windows into a third window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnintd_c.html :param a: Input window A. :type a: spiceypy.utils.support_types.SpiceCell :param b: Input window B. :type b: spiceypy.utils.support_types.SpiceCell :return: Intersection of a and b. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(a, stypes.SpiceCell) assert b.dtype == 1 assert isinstance(b, stypes.SpiceCell) assert a.dtype == 1 c = stypes.SpiceCell.double(b.size + a.size) libspice.wnintd_c(ctypes.byref(a), ctypes.byref(b), ctypes.byref(c)) return c
[ "def", "wnintd", "(", "a", ",", "b", ")", ":", "assert", "isinstance", "(", "a", ",", "stypes", ".", "SpiceCell", ")", "assert", "b", ".", "dtype", "==", "1", "assert", "isinstance", "(", "b", ",", "stypes", ".", "SpiceCell", ")", "assert", "a", ".", "dtype", "==", "1", "c", "=", "stypes", ".", "SpiceCell", ".", "double", "(", "b", ".", "size", "+", "a", ".", "size", ")", "libspice", ".", "wnintd_c", "(", "ctypes", ".", "byref", "(", "a", ")", ",", "ctypes", ".", "byref", "(", "b", ")", ",", "ctypes", ".", "byref", "(", "c", ")", ")", "return", "c" ]
Place the intersection of two double precision windows into a third window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnintd_c.html :param a: Input window A. :type a: spiceypy.utils.support_types.SpiceCell :param b: Input window B. :type b: spiceypy.utils.support_types.SpiceCell :return: Intersection of a and b. :rtype: spiceypy.utils.support_types.SpiceCell
[ "Place", "the", "intersection", "of", "two", "double", "precision", "windows", "into", "a", "third", "window", "." ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15562-L15583
15,002
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
wnreld
def wnreld(a, op, b): """ Compare two double precision windows. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnreld_c.html :param a: First window. :type a: spiceypy.utils.support_types.SpiceCell :param op: Comparison operator. :type op: str :param b: Second window. :type b: spiceypy.utils.support_types.SpiceCell :return: The result of comparison: a (op) b. :rtype: bool """ assert isinstance(a, stypes.SpiceCell) assert b.dtype == 1 assert isinstance(b, stypes.SpiceCell) assert a.dtype == 1 assert isinstance(op, str) op = stypes.stringToCharP(op.encode(encoding='UTF-8')) return bool(libspice.wnreld_c(ctypes.byref(a), op, ctypes.byref(b)))
python
def wnreld(a, op, b): """ Compare two double precision windows. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnreld_c.html :param a: First window. :type a: spiceypy.utils.support_types.SpiceCell :param op: Comparison operator. :type op: str :param b: Second window. :type b: spiceypy.utils.support_types.SpiceCell :return: The result of comparison: a (op) b. :rtype: bool """ assert isinstance(a, stypes.SpiceCell) assert b.dtype == 1 assert isinstance(b, stypes.SpiceCell) assert a.dtype == 1 assert isinstance(op, str) op = stypes.stringToCharP(op.encode(encoding='UTF-8')) return bool(libspice.wnreld_c(ctypes.byref(a), op, ctypes.byref(b)))
[ "def", "wnreld", "(", "a", ",", "op", ",", "b", ")", ":", "assert", "isinstance", "(", "a", ",", "stypes", ".", "SpiceCell", ")", "assert", "b", ".", "dtype", "==", "1", "assert", "isinstance", "(", "b", ",", "stypes", ".", "SpiceCell", ")", "assert", "a", ".", "dtype", "==", "1", "assert", "isinstance", "(", "op", ",", "str", ")", "op", "=", "stypes", ".", "stringToCharP", "(", "op", ".", "encode", "(", "encoding", "=", "'UTF-8'", ")", ")", "return", "bool", "(", "libspice", ".", "wnreld_c", "(", "ctypes", ".", "byref", "(", "a", ")", ",", "op", ",", "ctypes", ".", "byref", "(", "b", ")", ")", ")" ]
Compare two double precision windows. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnreld_c.html :param a: First window. :type a: spiceypy.utils.support_types.SpiceCell :param op: Comparison operator. :type op: str :param b: Second window. :type b: spiceypy.utils.support_types.SpiceCell :return: The result of comparison: a (op) b. :rtype: bool
[ "Compare", "two", "double", "precision", "windows", "." ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15587-L15608
15,003
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
wnsumd
def wnsumd(window): """ Summarize the contents of a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnsumd_c.html :param window: Window to be summarized. :type window: spiceypy.utils.support_types.SpiceCell :return: Total measure of intervals in window, Average measure, Standard deviation, Location of shortest interval, Location of longest interval. :rtype: tuple """ assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 meas = ctypes.c_double() avg = ctypes.c_double() stddev = ctypes.c_double() shortest = ctypes.c_int() longest = ctypes.c_int() libspice.wnsumd_c(ctypes.byref(window), ctypes.byref(meas), ctypes.byref(avg), ctypes.byref(stddev), ctypes.byref(shortest), ctypes.byref(longest)) return meas.value, avg.value, stddev.value, shortest.value, longest.value
python
def wnsumd(window): """ Summarize the contents of a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnsumd_c.html :param window: Window to be summarized. :type window: spiceypy.utils.support_types.SpiceCell :return: Total measure of intervals in window, Average measure, Standard deviation, Location of shortest interval, Location of longest interval. :rtype: tuple """ assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 meas = ctypes.c_double() avg = ctypes.c_double() stddev = ctypes.c_double() shortest = ctypes.c_int() longest = ctypes.c_int() libspice.wnsumd_c(ctypes.byref(window), ctypes.byref(meas), ctypes.byref(avg), ctypes.byref(stddev), ctypes.byref(shortest), ctypes.byref(longest)) return meas.value, avg.value, stddev.value, shortest.value, longest.value
[ "def", "wnsumd", "(", "window", ")", ":", "assert", "isinstance", "(", "window", ",", "stypes", ".", "SpiceCell", ")", "assert", "window", ".", "dtype", "==", "1", "meas", "=", "ctypes", ".", "c_double", "(", ")", "avg", "=", "ctypes", ".", "c_double", "(", ")", "stddev", "=", "ctypes", ".", "c_double", "(", ")", "shortest", "=", "ctypes", ".", "c_int", "(", ")", "longest", "=", "ctypes", ".", "c_int", "(", ")", "libspice", ".", "wnsumd_c", "(", "ctypes", ".", "byref", "(", "window", ")", ",", "ctypes", ".", "byref", "(", "meas", ")", ",", "ctypes", ".", "byref", "(", "avg", ")", ",", "ctypes", ".", "byref", "(", "stddev", ")", ",", "ctypes", ".", "byref", "(", "shortest", ")", ",", "ctypes", ".", "byref", "(", "longest", ")", ")", "return", "meas", ".", "value", ",", "avg", ".", "value", ",", "stddev", ".", "value", ",", "shortest", ".", "value", ",", "longest", ".", "value" ]
Summarize the contents of a double precision window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnsumd_c.html :param window: Window to be summarized. :type window: spiceypy.utils.support_types.SpiceCell :return: Total measure of intervals in window, Average measure, Standard deviation, Location of shortest interval, Location of longest interval. :rtype: tuple
[ "Summarize", "the", "contents", "of", "a", "double", "precision", "window", "." ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15612-L15637
15,004
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
wnunid
def wnunid(a, b): """ Place the union of two double precision windows into a third window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnunid_c.html :param a: Input window A. :type a: spiceypy.utils.support_types.SpiceCell :param b: Input window B. :type b: spiceypy.utils.support_types.SpiceCell :return: Union of a and b. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(a, stypes.SpiceCell) assert b.dtype == 1 assert isinstance(b, stypes.SpiceCell) assert a.dtype == 1 c = stypes.SpiceCell.double(b.size + a.size) libspice.wnunid_c(ctypes.byref(a), ctypes.byref(b), ctypes.byref(c)) return c
python
def wnunid(a, b): """ Place the union of two double precision windows into a third window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnunid_c.html :param a: Input window A. :type a: spiceypy.utils.support_types.SpiceCell :param b: Input window B. :type b: spiceypy.utils.support_types.SpiceCell :return: Union of a and b. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(a, stypes.SpiceCell) assert b.dtype == 1 assert isinstance(b, stypes.SpiceCell) assert a.dtype == 1 c = stypes.SpiceCell.double(b.size + a.size) libspice.wnunid_c(ctypes.byref(a), ctypes.byref(b), ctypes.byref(c)) return c
[ "def", "wnunid", "(", "a", ",", "b", ")", ":", "assert", "isinstance", "(", "a", ",", "stypes", ".", "SpiceCell", ")", "assert", "b", ".", "dtype", "==", "1", "assert", "isinstance", "(", "b", ",", "stypes", ".", "SpiceCell", ")", "assert", "a", ".", "dtype", "==", "1", "c", "=", "stypes", ".", "SpiceCell", ".", "double", "(", "b", ".", "size", "+", "a", ".", "size", ")", "libspice", ".", "wnunid_c", "(", "ctypes", ".", "byref", "(", "a", ")", ",", "ctypes", ".", "byref", "(", "b", ")", ",", "ctypes", ".", "byref", "(", "c", ")", ")", "return", "c" ]
Place the union of two double precision windows into a third window. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnunid_c.html :param a: Input window A. :type a: spiceypy.utils.support_types.SpiceCell :param b: Input window B. :type b: spiceypy.utils.support_types.SpiceCell :return: Union of a and b. :rtype: spiceypy.utils.support_types.SpiceCell
[ "Place", "the", "union", "of", "two", "double", "precision", "windows", "into", "a", "third", "window", "." ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15641-L15660
15,005
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
wnvald
def wnvald(insize, n, window): """ Form a valid double precision window from the contents of a window array. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnvald_c.html :param insize: Size of window. :type insize: int :param n: Original number of endpoints. :type n: int :param window: Input window. :type window: spiceypy.utils.support_types.SpiceCell :return: The union of the intervals in the input cell. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 insize = ctypes.c_int(insize) n = ctypes.c_int(n) libspice.wnvald_c(insize, n, ctypes.byref(window)) return window
python
def wnvald(insize, n, window): """ Form a valid double precision window from the contents of a window array. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnvald_c.html :param insize: Size of window. :type insize: int :param n: Original number of endpoints. :type n: int :param window: Input window. :type window: spiceypy.utils.support_types.SpiceCell :return: The union of the intervals in the input cell. :rtype: spiceypy.utils.support_types.SpiceCell """ assert isinstance(window, stypes.SpiceCell) assert window.dtype == 1 insize = ctypes.c_int(insize) n = ctypes.c_int(n) libspice.wnvald_c(insize, n, ctypes.byref(window)) return window
[ "def", "wnvald", "(", "insize", ",", "n", ",", "window", ")", ":", "assert", "isinstance", "(", "window", ",", "stypes", ".", "SpiceCell", ")", "assert", "window", ".", "dtype", "==", "1", "insize", "=", "ctypes", ".", "c_int", "(", "insize", ")", "n", "=", "ctypes", ".", "c_int", "(", "n", ")", "libspice", ".", "wnvald_c", "(", "insize", ",", "n", ",", "ctypes", ".", "byref", "(", "window", ")", ")", "return", "window" ]
Form a valid double precision window from the contents of a window array. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/wnvald_c.html :param insize: Size of window. :type insize: int :param n: Original number of endpoints. :type n: int :param window: Input window. :type window: spiceypy.utils.support_types.SpiceCell :return: The union of the intervals in the input cell. :rtype: spiceypy.utils.support_types.SpiceCell
[ "Form", "a", "valid", "double", "precision", "window", "from", "the", "contents", "of", "a", "window", "array", "." ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15664-L15685
15,006
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
writln
def writln(line, unit): """ Internal undocumented command for writing a text line to a logical unit No URL available; relevant lines from SPICE source: FORTRAN SPICE, writln.f:: C$Procedure WRITLN ( Write a text line to a logical unit ) SUBROUTINE WRITLN ( LINE, UNIT ) CHARACTER*(*) LINE INTEGER UNIT C Variable I/O Description C -------- --- -------------------------------------------------- C LINE I The line which is to be written to UNIT. C UNIT I The Fortran unit number to use for output. CSPICE, writln.c:: /* $Procedure WRITLN ( Write a text line to a logical unit ) */ /* Subroutine */ int writln_(char *line, integer *unit, ftnlen line_len) :param line: The line which is to be written to UNIT. :type line: str :param unit: The Fortran unit number to use for output. :type unit: int """ lineP = stypes.stringToCharP(line) unit = ctypes.c_int(unit) line_len = ctypes.c_int(len(line)) libspice.writln_(lineP, ctypes.byref(unit), line_len)
python
def writln(line, unit): """ Internal undocumented command for writing a text line to a logical unit No URL available; relevant lines from SPICE source: FORTRAN SPICE, writln.f:: C$Procedure WRITLN ( Write a text line to a logical unit ) SUBROUTINE WRITLN ( LINE, UNIT ) CHARACTER*(*) LINE INTEGER UNIT C Variable I/O Description C -------- --- -------------------------------------------------- C LINE I The line which is to be written to UNIT. C UNIT I The Fortran unit number to use for output. CSPICE, writln.c:: /* $Procedure WRITLN ( Write a text line to a logical unit ) */ /* Subroutine */ int writln_(char *line, integer *unit, ftnlen line_len) :param line: The line which is to be written to UNIT. :type line: str :param unit: The Fortran unit number to use for output. :type unit: int """ lineP = stypes.stringToCharP(line) unit = ctypes.c_int(unit) line_len = ctypes.c_int(len(line)) libspice.writln_(lineP, ctypes.byref(unit), line_len)
[ "def", "writln", "(", "line", ",", "unit", ")", ":", "lineP", "=", "stypes", ".", "stringToCharP", "(", "line", ")", "unit", "=", "ctypes", ".", "c_int", "(", "unit", ")", "line_len", "=", "ctypes", ".", "c_int", "(", "len", "(", "line", ")", ")", "libspice", ".", "writln_", "(", "lineP", ",", "ctypes", ".", "byref", "(", "unit", ")", ",", "line_len", ")" ]
Internal undocumented command for writing a text line to a logical unit No URL available; relevant lines from SPICE source: FORTRAN SPICE, writln.f:: C$Procedure WRITLN ( Write a text line to a logical unit ) SUBROUTINE WRITLN ( LINE, UNIT ) CHARACTER*(*) LINE INTEGER UNIT C Variable I/O Description C -------- --- -------------------------------------------------- C LINE I The line which is to be written to UNIT. C UNIT I The Fortran unit number to use for output. CSPICE, writln.c:: /* $Procedure WRITLN ( Write a text line to a logical unit ) */ /* Subroutine */ int writln_(char *line, integer *unit, ftnlen line_len) :param line: The line which is to be written to UNIT. :type line: str :param unit: The Fortran unit number to use for output. :type unit: int
[ "Internal", "undocumented", "command", "for", "writing", "a", "text", "line", "to", "a", "logical", "unit" ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15689-L15720
15,007
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
xfmsta
def xfmsta(input_state, input_coord_sys, output_coord_sys, body): """ Transform a state between coordinate systems. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xfmsta_c.html :param input_state: Input state. :type input_state: 6-Element Array of floats :param input_coord_sys: Current (input) coordinate system. :type input_coord_sys: str :param output_coord_sys: Desired (output) coordinate system. :type output_coord_sys: str :param body: Name or NAIF ID of body with which coordinates are associated (if applicable). :type body: str :return: Converted output state :rtype: 6-Element Array of floats """ input_state = stypes.toDoubleVector(input_state) input_coord_sys = stypes.stringToCharP(input_coord_sys) output_coord_sys = stypes.stringToCharP(output_coord_sys) body = stypes.stringToCharP(body) output_state = stypes.emptyDoubleVector(6) libspice.xfmsta_c(input_state, input_coord_sys, output_coord_sys, body, output_state) return stypes.cVectorToPython(output_state)
python
def xfmsta(input_state, input_coord_sys, output_coord_sys, body): """ Transform a state between coordinate systems. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xfmsta_c.html :param input_state: Input state. :type input_state: 6-Element Array of floats :param input_coord_sys: Current (input) coordinate system. :type input_coord_sys: str :param output_coord_sys: Desired (output) coordinate system. :type output_coord_sys: str :param body: Name or NAIF ID of body with which coordinates are associated (if applicable). :type body: str :return: Converted output state :rtype: 6-Element Array of floats """ input_state = stypes.toDoubleVector(input_state) input_coord_sys = stypes.stringToCharP(input_coord_sys) output_coord_sys = stypes.stringToCharP(output_coord_sys) body = stypes.stringToCharP(body) output_state = stypes.emptyDoubleVector(6) libspice.xfmsta_c(input_state, input_coord_sys, output_coord_sys, body, output_state) return stypes.cVectorToPython(output_state)
[ "def", "xfmsta", "(", "input_state", ",", "input_coord_sys", ",", "output_coord_sys", ",", "body", ")", ":", "input_state", "=", "stypes", ".", "toDoubleVector", "(", "input_state", ")", "input_coord_sys", "=", "stypes", ".", "stringToCharP", "(", "input_coord_sys", ")", "output_coord_sys", "=", "stypes", ".", "stringToCharP", "(", "output_coord_sys", ")", "body", "=", "stypes", ".", "stringToCharP", "(", "body", ")", "output_state", "=", "stypes", ".", "emptyDoubleVector", "(", "6", ")", "libspice", ".", "xfmsta_c", "(", "input_state", ",", "input_coord_sys", ",", "output_coord_sys", ",", "body", ",", "output_state", ")", "return", "stypes", ".", "cVectorToPython", "(", "output_state", ")" ]
Transform a state between coordinate systems. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xfmsta_c.html :param input_state: Input state. :type input_state: 6-Element Array of floats :param input_coord_sys: Current (input) coordinate system. :type input_coord_sys: str :param output_coord_sys: Desired (output) coordinate system. :type output_coord_sys: str :param body: Name or NAIF ID of body with which coordinates are associated (if applicable). :type body: str :return: Converted output state :rtype: 6-Element Array of floats
[ "Transform", "a", "state", "between", "coordinate", "systems", "." ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15774-L15800
15,008
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
xpose
def xpose(m): """ Transpose a 3x3 matrix http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xpose_c.html :param m: Matrix to be transposed :type m: 3x3-Element Array of floats :return: Transposed matrix :rtype: 3x3-Element Array of floats """ m = stypes.toDoubleMatrix(m) mout = stypes.emptyDoubleMatrix(x=3, y=3) libspice.xpose_c(m, mout) return stypes.cMatrixToNumpy(mout)
python
def xpose(m): """ Transpose a 3x3 matrix http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xpose_c.html :param m: Matrix to be transposed :type m: 3x3-Element Array of floats :return: Transposed matrix :rtype: 3x3-Element Array of floats """ m = stypes.toDoubleMatrix(m) mout = stypes.emptyDoubleMatrix(x=3, y=3) libspice.xpose_c(m, mout) return stypes.cMatrixToNumpy(mout)
[ "def", "xpose", "(", "m", ")", ":", "m", "=", "stypes", ".", "toDoubleMatrix", "(", "m", ")", "mout", "=", "stypes", ".", "emptyDoubleMatrix", "(", "x", "=", "3", ",", "y", "=", "3", ")", "libspice", ".", "xpose_c", "(", "m", ",", "mout", ")", "return", "stypes", ".", "cMatrixToNumpy", "(", "mout", ")" ]
Transpose a 3x3 matrix http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xpose_c.html :param m: Matrix to be transposed :type m: 3x3-Element Array of floats :return: Transposed matrix :rtype: 3x3-Element Array of floats
[ "Transpose", "a", "3x3", "matrix" ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15804-L15818
15,009
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
xpose6
def xpose6(m): """ Transpose a 6x6 matrix http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xpose6_c.html :param m: Matrix to be transposed :type m: list[6][6] :return: Transposed matrix :rtype: list[6][6] """ m = stypes.toDoubleMatrix(m) mout = stypes.emptyDoubleMatrix(x=6, y=6) libspice.xpose6_c(m, mout) return stypes.cMatrixToNumpy(mout)
python
def xpose6(m): """ Transpose a 6x6 matrix http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xpose6_c.html :param m: Matrix to be transposed :type m: list[6][6] :return: Transposed matrix :rtype: list[6][6] """ m = stypes.toDoubleMatrix(m) mout = stypes.emptyDoubleMatrix(x=6, y=6) libspice.xpose6_c(m, mout) return stypes.cMatrixToNumpy(mout)
[ "def", "xpose6", "(", "m", ")", ":", "m", "=", "stypes", ".", "toDoubleMatrix", "(", "m", ")", "mout", "=", "stypes", ".", "emptyDoubleMatrix", "(", "x", "=", "6", ",", "y", "=", "6", ")", "libspice", ".", "xpose6_c", "(", "m", ",", "mout", ")", "return", "stypes", ".", "cMatrixToNumpy", "(", "mout", ")" ]
Transpose a 6x6 matrix http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xpose6_c.html :param m: Matrix to be transposed :type m: list[6][6] :return: Transposed matrix :rtype: list[6][6]
[ "Transpose", "a", "6x6", "matrix" ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15822-L15836
15,010
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
xposeg
def xposeg(matrix, nrow, ncol): """ Transpose a matrix of arbitrary size in place, the matrix need not be square. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xposeg_c.html :param matrix: Matrix to be transposed :type matrix: NxM-Element Array of floats :param nrow: Number of rows of input matrix. :type nrow: int :param ncol: Number of columns of input matrix :type ncol: int :return: Transposed matrix :rtype: NxM-Element Array of floats """ matrix = stypes.toDoubleMatrix(matrix) mout = stypes.emptyDoubleMatrix(x=ncol, y=nrow) ncol = ctypes.c_int(ncol) nrow = ctypes.c_int(nrow) libspice.xposeg_c(matrix, nrow, ncol, mout) return stypes.cMatrixToNumpy(mout)
python
def xposeg(matrix, nrow, ncol): """ Transpose a matrix of arbitrary size in place, the matrix need not be square. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xposeg_c.html :param matrix: Matrix to be transposed :type matrix: NxM-Element Array of floats :param nrow: Number of rows of input matrix. :type nrow: int :param ncol: Number of columns of input matrix :type ncol: int :return: Transposed matrix :rtype: NxM-Element Array of floats """ matrix = stypes.toDoubleMatrix(matrix) mout = stypes.emptyDoubleMatrix(x=ncol, y=nrow) ncol = ctypes.c_int(ncol) nrow = ctypes.c_int(nrow) libspice.xposeg_c(matrix, nrow, ncol, mout) return stypes.cMatrixToNumpy(mout)
[ "def", "xposeg", "(", "matrix", ",", "nrow", ",", "ncol", ")", ":", "matrix", "=", "stypes", ".", "toDoubleMatrix", "(", "matrix", ")", "mout", "=", "stypes", ".", "emptyDoubleMatrix", "(", "x", "=", "ncol", ",", "y", "=", "nrow", ")", "ncol", "=", "ctypes", ".", "c_int", "(", "ncol", ")", "nrow", "=", "ctypes", ".", "c_int", "(", "nrow", ")", "libspice", ".", "xposeg_c", "(", "matrix", ",", "nrow", ",", "ncol", ",", "mout", ")", "return", "stypes", ".", "cMatrixToNumpy", "(", "mout", ")" ]
Transpose a matrix of arbitrary size in place, the matrix need not be square. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/xposeg_c.html :param matrix: Matrix to be transposed :type matrix: NxM-Element Array of floats :param nrow: Number of rows of input matrix. :type nrow: int :param ncol: Number of columns of input matrix :type ncol: int :return: Transposed matrix :rtype: NxM-Element Array of floats
[ "Transpose", "a", "matrix", "of", "arbitrary", "size", "in", "place", "the", "matrix", "need", "not", "be", "square", "." ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L15840-L15861
15,011
AndrewAnnex/SpiceyPy
spiceypy/utils/callbacks.py
CallUDFUNS
def CallUDFUNS(f, x): """ We are given a UDF CFUNCTYPE and want to call it in python :param f: SpiceUDFUNS :type f: CFUNCTYPE :param x: some scalar :type x: float :return: value :rtype: float """ value = c_double() f(x, byref(value)) return value.value
python
def CallUDFUNS(f, x): """ We are given a UDF CFUNCTYPE and want to call it in python :param f: SpiceUDFUNS :type f: CFUNCTYPE :param x: some scalar :type x: float :return: value :rtype: float """ value = c_double() f(x, byref(value)) return value.value
[ "def", "CallUDFUNS", "(", "f", ",", "x", ")", ":", "value", "=", "c_double", "(", ")", "f", "(", "x", ",", "byref", "(", "value", ")", ")", "return", "value", ".", "value" ]
We are given a UDF CFUNCTYPE and want to call it in python :param f: SpiceUDFUNS :type f: CFUNCTYPE :param x: some scalar :type x: float :return: value :rtype: float
[ "We", "are", "given", "a", "UDF", "CFUNCTYPE", "and", "want", "to", "call", "it", "in", "python" ]
fc20a9b9de68b58eed5b332f0c051fb343a6e335
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/utils/callbacks.py#L158-L171
15,012
mnick/scikit-tensor
sktensor/dedicom.py
Updater.updateD_G
def updateD_G(self, x): """ Compute Gradient for update of D See [2] for derivation of Gradient """ self.precompute(x) g = zeros(len(x)) Ai = zeros(self.A.shape[0]) for i in range(len(g)): Ai = self.A[:, i] g[i] = (self.E * (dot(self.AD, outer(self.R[:, i], Ai)) + dot(outer(Ai, self.R[i, :]), self.ADt))).sum() return -2 * g
python
def updateD_G(self, x): """ Compute Gradient for update of D See [2] for derivation of Gradient """ self.precompute(x) g = zeros(len(x)) Ai = zeros(self.A.shape[0]) for i in range(len(g)): Ai = self.A[:, i] g[i] = (self.E * (dot(self.AD, outer(self.R[:, i], Ai)) + dot(outer(Ai, self.R[i, :]), self.ADt))).sum() return -2 * g
[ "def", "updateD_G", "(", "self", ",", "x", ")", ":", "self", ".", "precompute", "(", "x", ")", "g", "=", "zeros", "(", "len", "(", "x", ")", ")", "Ai", "=", "zeros", "(", "self", ".", "A", ".", "shape", "[", "0", "]", ")", "for", "i", "in", "range", "(", "len", "(", "g", ")", ")", ":", "Ai", "=", "self", ".", "A", "[", ":", ",", "i", "]", "g", "[", "i", "]", "=", "(", "self", ".", "E", "*", "(", "dot", "(", "self", ".", "AD", ",", "outer", "(", "self", ".", "R", "[", ":", ",", "i", "]", ",", "Ai", ")", ")", "+", "dot", "(", "outer", "(", "Ai", ",", "self", ".", "R", "[", "i", ",", ":", "]", ")", ",", "self", ".", "ADt", ")", ")", ")", ".", "sum", "(", ")", "return", "-", "2", "*", "g" ]
Compute Gradient for update of D See [2] for derivation of Gradient
[ "Compute", "Gradient", "for", "update", "of", "D" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/dedicom.py#L226-L239
15,013
mnick/scikit-tensor
sktensor/dedicom.py
Updater.updateD_H
def updateD_H(self, x): """ Compute Hessian for update of D See [2] for derivation of Hessian """ self.precompute(x) H = zeros((len(x), len(x))) Ai = zeros(self.A.shape[0]) Aj = zeros(Ai.shape) for i in range(len(x)): Ai = self.A[:, i] ti = dot(self.AD, outer(self.R[:, i], Ai)) + dot(outer(Ai, self.R[i, :]), self.ADt) for j in range(i, len(x)): Aj = self.A[:, j] tj = outer(Ai, Aj) H[i, j] = ( self.E * (self.R[i, j] * tj + self.R[j, i] * tj.T) - ti * ( dot(self.AD, outer(self.R[:, j], Aj)) + dot(outer(Aj, self.R[j, :]), self.ADt) ) ).sum() H[j, i] = H[i, j] H *= -2 e = eigvals(H).min() H = H + (eye(H.shape[0]) * e) return H
python
def updateD_H(self, x): """ Compute Hessian for update of D See [2] for derivation of Hessian """ self.precompute(x) H = zeros((len(x), len(x))) Ai = zeros(self.A.shape[0]) Aj = zeros(Ai.shape) for i in range(len(x)): Ai = self.A[:, i] ti = dot(self.AD, outer(self.R[:, i], Ai)) + dot(outer(Ai, self.R[i, :]), self.ADt) for j in range(i, len(x)): Aj = self.A[:, j] tj = outer(Ai, Aj) H[i, j] = ( self.E * (self.R[i, j] * tj + self.R[j, i] * tj.T) - ti * ( dot(self.AD, outer(self.R[:, j], Aj)) + dot(outer(Aj, self.R[j, :]), self.ADt) ) ).sum() H[j, i] = H[i, j] H *= -2 e = eigvals(H).min() H = H + (eye(H.shape[0]) * e) return H
[ "def", "updateD_H", "(", "self", ",", "x", ")", ":", "self", ".", "precompute", "(", "x", ")", "H", "=", "zeros", "(", "(", "len", "(", "x", ")", ",", "len", "(", "x", ")", ")", ")", "Ai", "=", "zeros", "(", "self", ".", "A", ".", "shape", "[", "0", "]", ")", "Aj", "=", "zeros", "(", "Ai", ".", "shape", ")", "for", "i", "in", "range", "(", "len", "(", "x", ")", ")", ":", "Ai", "=", "self", ".", "A", "[", ":", ",", "i", "]", "ti", "=", "dot", "(", "self", ".", "AD", ",", "outer", "(", "self", ".", "R", "[", ":", ",", "i", "]", ",", "Ai", ")", ")", "+", "dot", "(", "outer", "(", "Ai", ",", "self", ".", "R", "[", "i", ",", ":", "]", ")", ",", "self", ".", "ADt", ")", "for", "j", "in", "range", "(", "i", ",", "len", "(", "x", ")", ")", ":", "Aj", "=", "self", ".", "A", "[", ":", ",", "j", "]", "tj", "=", "outer", "(", "Ai", ",", "Aj", ")", "H", "[", "i", ",", "j", "]", "=", "(", "self", ".", "E", "*", "(", "self", ".", "R", "[", "i", ",", "j", "]", "*", "tj", "+", "self", ".", "R", "[", "j", ",", "i", "]", "*", "tj", ".", "T", ")", "-", "ti", "*", "(", "dot", "(", "self", ".", "AD", ",", "outer", "(", "self", ".", "R", "[", ":", ",", "j", "]", ",", "Aj", ")", ")", "+", "dot", "(", "outer", "(", "Aj", ",", "self", ".", "R", "[", "j", ",", ":", "]", ")", ",", "self", ".", "ADt", ")", ")", ")", ".", "sum", "(", ")", "H", "[", "j", ",", "i", "]", "=", "H", "[", "i", ",", "j", "]", "H", "*=", "-", "2", "e", "=", "eigvals", "(", "H", ")", ".", "min", "(", ")", "H", "=", "H", "+", "(", "eye", "(", "H", ".", "shape", "[", "0", "]", ")", "*", "e", ")", "return", "H" ]
Compute Hessian for update of D See [2] for derivation of Hessian
[ "Compute", "Hessian", "for", "update", "of", "D" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/dedicom.py#L241-L269
15,014
mnick/scikit-tensor
sktensor/pyutils.py
is_sequence
def is_sequence(obj): """ Helper function to determine sequences across Python 2.x and 3.x """ try: from collections import Sequence except ImportError: from operator import isSequenceType return isSequenceType(obj) else: return isinstance(obj, Sequence)
python
def is_sequence(obj): """ Helper function to determine sequences across Python 2.x and 3.x """ try: from collections import Sequence except ImportError: from operator import isSequenceType return isSequenceType(obj) else: return isinstance(obj, Sequence)
[ "def", "is_sequence", "(", "obj", ")", ":", "try", ":", "from", "collections", "import", "Sequence", "except", "ImportError", ":", "from", "operator", "import", "isSequenceType", "return", "isSequenceType", "(", "obj", ")", "else", ":", "return", "isinstance", "(", "obj", ",", "Sequence", ")" ]
Helper function to determine sequences across Python 2.x and 3.x
[ "Helper", "function", "to", "determine", "sequences", "across", "Python", "2", ".", "x", "and", "3", ".", "x" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/pyutils.py#L8-L19
15,015
mnick/scikit-tensor
sktensor/pyutils.py
is_number
def is_number(obj): """ Helper function to determine numbers across Python 2.x and 3.x """ try: from numbers import Number except ImportError: from operator import isNumberType return isNumberType(obj) else: return isinstance(obj, Number)
python
def is_number(obj): """ Helper function to determine numbers across Python 2.x and 3.x """ try: from numbers import Number except ImportError: from operator import isNumberType return isNumberType(obj) else: return isinstance(obj, Number)
[ "def", "is_number", "(", "obj", ")", ":", "try", ":", "from", "numbers", "import", "Number", "except", "ImportError", ":", "from", "operator", "import", "isNumberType", "return", "isNumberType", "(", "obj", ")", "else", ":", "return", "isinstance", "(", "obj", ",", "Number", ")" ]
Helper function to determine numbers across Python 2.x and 3.x
[ "Helper", "function", "to", "determine", "numbers", "across", "Python", "2", ".", "x", "and", "3", ".", "x" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/pyutils.py#L22-L33
15,016
mnick/scikit-tensor
sktensor/pyutils.py
func_attr
def func_attr(f, attr): """ Helper function to get the attribute of a function like, name, code, defaults across Python 2.x and 3.x """ if hasattr(f, 'func_%s' % attr): return getattr(f, 'func_%s' % attr) elif hasattr(f, '__%s__' % attr): return getattr(f, '__%s__' % attr) else: raise ValueError('Object %s has no attr' % (str(f), attr))
python
def func_attr(f, attr): """ Helper function to get the attribute of a function like, name, code, defaults across Python 2.x and 3.x """ if hasattr(f, 'func_%s' % attr): return getattr(f, 'func_%s' % attr) elif hasattr(f, '__%s__' % attr): return getattr(f, '__%s__' % attr) else: raise ValueError('Object %s has no attr' % (str(f), attr))
[ "def", "func_attr", "(", "f", ",", "attr", ")", ":", "if", "hasattr", "(", "f", ",", "'func_%s'", "%", "attr", ")", ":", "return", "getattr", "(", "f", ",", "'func_%s'", "%", "attr", ")", "elif", "hasattr", "(", "f", ",", "'__%s__'", "%", "attr", ")", ":", "return", "getattr", "(", "f", ",", "'__%s__'", "%", "attr", ")", "else", ":", "raise", "ValueError", "(", "'Object %s has no attr'", "%", "(", "str", "(", "f", ")", ",", "attr", ")", ")" ]
Helper function to get the attribute of a function like, name, code, defaults across Python 2.x and 3.x
[ "Helper", "function", "to", "get", "the", "attribute", "of", "a", "function", "like", "name", "code", "defaults", "across", "Python", "2", ".", "x", "and", "3", ".", "x" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/pyutils.py#L36-L46
15,017
mnick/scikit-tensor
sktensor/pyutils.py
from_to_without
def from_to_without(frm, to, without, step=1, skip=1, reverse=False, separate=False): """ Helper function to create ranges with missing entries """ if reverse: frm, to = (to - 1), (frm - 1) step *= -1 skip *= -1 a = list(range(frm, without, step)) b = list(range(without + skip, to, step)) if separate: return a, b else: return a + b
python
def from_to_without(frm, to, without, step=1, skip=1, reverse=False, separate=False): """ Helper function to create ranges with missing entries """ if reverse: frm, to = (to - 1), (frm - 1) step *= -1 skip *= -1 a = list(range(frm, without, step)) b = list(range(without + skip, to, step)) if separate: return a, b else: return a + b
[ "def", "from_to_without", "(", "frm", ",", "to", ",", "without", ",", "step", "=", "1", ",", "skip", "=", "1", ",", "reverse", "=", "False", ",", "separate", "=", "False", ")", ":", "if", "reverse", ":", "frm", ",", "to", "=", "(", "to", "-", "1", ")", ",", "(", "frm", "-", "1", ")", "step", "*=", "-", "1", "skip", "*=", "-", "1", "a", "=", "list", "(", "range", "(", "frm", ",", "without", ",", "step", ")", ")", "b", "=", "list", "(", "range", "(", "without", "+", "skip", ",", "to", ",", "step", ")", ")", "if", "separate", ":", "return", "a", ",", "b", "else", ":", "return", "a", "+", "b" ]
Helper function to create ranges with missing entries
[ "Helper", "function", "to", "create", "ranges", "with", "missing", "entries" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/pyutils.py#L49-L62
15,018
mnick/scikit-tensor
sktensor/dtensor.py
dtensor.unfold
def unfold(self, mode): """ Unfolds a dense tensor in mode n. Parameters ---------- mode : int Mode in which tensor is unfolded Returns ------- unfolded_dtensor : unfolded_dtensor object Tensor unfolded along mode Examples -------- Create dense tensor from numpy array >>> T = np.zeros((3, 4, 2)) >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] >>> T = dtensor(T) Unfolding of dense tensors >>> T.unfold(0) array([[ 1., 4., 7., 10., 13., 16., 19., 22.], [ 2., 5., 8., 11., 14., 17., 20., 23.], [ 3., 6., 9., 12., 15., 18., 21., 24.]]) >>> T.unfold(1) array([[ 1., 2., 3., 13., 14., 15.], [ 4., 5., 6., 16., 17., 18.], [ 7., 8., 9., 19., 20., 21.], [ 10., 11., 12., 22., 23., 24.]]) >>> T.unfold(2) array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.], [ 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24.]]) """ sz = array(self.shape) N = len(sz) order = ([mode], from_to_without(N - 1, -1, mode, step=-1, skip=-1)) newsz = (sz[order[0]][0], prod(sz[order[1]])) arr = self.transpose(axes=(order[0] + order[1])) arr = arr.reshape(newsz) return unfolded_dtensor(arr, mode, self.shape)
python
def unfold(self, mode): """ Unfolds a dense tensor in mode n. Parameters ---------- mode : int Mode in which tensor is unfolded Returns ------- unfolded_dtensor : unfolded_dtensor object Tensor unfolded along mode Examples -------- Create dense tensor from numpy array >>> T = np.zeros((3, 4, 2)) >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] >>> T = dtensor(T) Unfolding of dense tensors >>> T.unfold(0) array([[ 1., 4., 7., 10., 13., 16., 19., 22.], [ 2., 5., 8., 11., 14., 17., 20., 23.], [ 3., 6., 9., 12., 15., 18., 21., 24.]]) >>> T.unfold(1) array([[ 1., 2., 3., 13., 14., 15.], [ 4., 5., 6., 16., 17., 18.], [ 7., 8., 9., 19., 20., 21.], [ 10., 11., 12., 22., 23., 24.]]) >>> T.unfold(2) array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.], [ 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24.]]) """ sz = array(self.shape) N = len(sz) order = ([mode], from_to_without(N - 1, -1, mode, step=-1, skip=-1)) newsz = (sz[order[0]][0], prod(sz[order[1]])) arr = self.transpose(axes=(order[0] + order[1])) arr = arr.reshape(newsz) return unfolded_dtensor(arr, mode, self.shape)
[ "def", "unfold", "(", "self", ",", "mode", ")", ":", "sz", "=", "array", "(", "self", ".", "shape", ")", "N", "=", "len", "(", "sz", ")", "order", "=", "(", "[", "mode", "]", ",", "from_to_without", "(", "N", "-", "1", ",", "-", "1", ",", "mode", ",", "step", "=", "-", "1", ",", "skip", "=", "-", "1", ")", ")", "newsz", "=", "(", "sz", "[", "order", "[", "0", "]", "]", "[", "0", "]", ",", "prod", "(", "sz", "[", "order", "[", "1", "]", "]", ")", ")", "arr", "=", "self", ".", "transpose", "(", "axes", "=", "(", "order", "[", "0", "]", "+", "order", "[", "1", "]", ")", ")", "arr", "=", "arr", ".", "reshape", "(", "newsz", ")", "return", "unfolded_dtensor", "(", "arr", ",", "mode", ",", "self", ".", "shape", ")" ]
Unfolds a dense tensor in mode n. Parameters ---------- mode : int Mode in which tensor is unfolded Returns ------- unfolded_dtensor : unfolded_dtensor object Tensor unfolded along mode Examples -------- Create dense tensor from numpy array >>> T = np.zeros((3, 4, 2)) >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] >>> T = dtensor(T) Unfolding of dense tensors >>> T.unfold(0) array([[ 1., 4., 7., 10., 13., 16., 19., 22.], [ 2., 5., 8., 11., 14., 17., 20., 23.], [ 3., 6., 9., 12., 15., 18., 21., 24.]]) >>> T.unfold(1) array([[ 1., 2., 3., 13., 14., 15.], [ 4., 5., 6., 16., 17., 18.], [ 7., 8., 9., 19., 20., 21.], [ 10., 11., 12., 22., 23., 24.]]) >>> T.unfold(2) array([[ 1., 2., 3., 4., 5., 6., 7., 8., 9., 10., 11., 12.], [ 13., 14., 15., 16., 17., 18., 19., 20., 21., 22., 23., 24.]])
[ "Unfolds", "a", "dense", "tensor", "in", "mode", "n", "." ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/dtensor.py#L103-L150
15,019
mnick/scikit-tensor
sktensor/utils.py
accum
def accum(subs, vals, func=np.sum, issorted=False, with_subs=False): """ NumPy implementation for Matlab's accumarray """ # sort accmap for ediff if not sorted if not issorted: sidx = lexsort(subs, axis=0) subs = [sub[sidx] for sub in subs] vals = vals[sidx] idx = np.where(np.diff(subs).any(axis=0))[0] + 1 idx = np.concatenate(([0], idx, [subs[0].shape[0]])) # create values array nvals = np.zeros(len(idx) - 1) for i in range(len(idx) - 1): nvals[i] = func(vals[idx[i]:idx[i + 1]]) # return results if with_subs: return nvals, tuple(sub[idx[:-1]] for sub in subs) else: return nvals
python
def accum(subs, vals, func=np.sum, issorted=False, with_subs=False): """ NumPy implementation for Matlab's accumarray """ # sort accmap for ediff if not sorted if not issorted: sidx = lexsort(subs, axis=0) subs = [sub[sidx] for sub in subs] vals = vals[sidx] idx = np.where(np.diff(subs).any(axis=0))[0] + 1 idx = np.concatenate(([0], idx, [subs[0].shape[0]])) # create values array nvals = np.zeros(len(idx) - 1) for i in range(len(idx) - 1): nvals[i] = func(vals[idx[i]:idx[i + 1]]) # return results if with_subs: return nvals, tuple(sub[idx[:-1]] for sub in subs) else: return nvals
[ "def", "accum", "(", "subs", ",", "vals", ",", "func", "=", "np", ".", "sum", ",", "issorted", "=", "False", ",", "with_subs", "=", "False", ")", ":", "# sort accmap for ediff if not sorted", "if", "not", "issorted", ":", "sidx", "=", "lexsort", "(", "subs", ",", "axis", "=", "0", ")", "subs", "=", "[", "sub", "[", "sidx", "]", "for", "sub", "in", "subs", "]", "vals", "=", "vals", "[", "sidx", "]", "idx", "=", "np", ".", "where", "(", "np", ".", "diff", "(", "subs", ")", ".", "any", "(", "axis", "=", "0", ")", ")", "[", "0", "]", "+", "1", "idx", "=", "np", ".", "concatenate", "(", "(", "[", "0", "]", ",", "idx", ",", "[", "subs", "[", "0", "]", ".", "shape", "[", "0", "]", "]", ")", ")", "# create values array", "nvals", "=", "np", ".", "zeros", "(", "len", "(", "idx", ")", "-", "1", ")", "for", "i", "in", "range", "(", "len", "(", "idx", ")", "-", "1", ")", ":", "nvals", "[", "i", "]", "=", "func", "(", "vals", "[", "idx", "[", "i", "]", ":", "idx", "[", "i", "+", "1", "]", "]", ")", "# return results", "if", "with_subs", ":", "return", "nvals", ",", "tuple", "(", "sub", "[", "idx", "[", ":", "-", "1", "]", "]", "for", "sub", "in", "subs", ")", "else", ":", "return", "nvals" ]
NumPy implementation for Matlab's accumarray
[ "NumPy", "implementation", "for", "Matlab", "s", "accumarray" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/utils.py#L5-L26
15,020
mnick/scikit-tensor
sktensor/tucker.py
hooi
def hooi(X, rank, **kwargs): """ Compute Tucker decomposition of a tensor using Higher-Order Orthogonal Iterations. Parameters ---------- X : tensor_mixin The tensor to be decomposed rank : array_like The rank of the decomposition for each mode of the tensor. The length of ``rank`` must match the number of modes of ``X``. init : {'random', 'nvecs'}, optional The initialization method to use. - random : Factor matrices are initialized randomly. - nvecs : Factor matrices are initialzed via HOSVD. default : 'nvecs' Examples -------- Create dense tensor >>> T = np.zeros((3, 4, 2)) >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] >>> T = dtensor(T) Compute Tucker decomposition of ``T`` with n-rank [2, 3, 1] via higher-order orthogonal iterations >>> Y = hooi(T, [2, 3, 1], init='nvecs') Shape of the core tensor matches n-rank of the decomposition. >>> Y['core'].shape (2, 3, 1) >>> Y['U'][1].shape (3, 2) References ---------- .. [1] L. De Lathauwer, B. De Moor, J. Vandewalle: On the best rank-1 and rank-(R_1, R_2, \ldots, R_N) approximation of higher order tensors; IEEE Trans. Signal Process. 49 (2001), pp. 2262-2271 """ # init options ainit = kwargs.pop('init', __DEF_INIT) maxIter = kwargs.pop('maxIter', __DEF_MAXITER) conv = kwargs.pop('conv', __DEF_CONV) dtype = kwargs.pop('dtype', X.dtype) if not len(kwargs) == 0: raise ValueError('Unknown keywords (%s)' % (kwargs.keys())) ndims = X.ndim if is_number(rank): rank = rank * ones(ndims) normX = norm(X) U = __init(ainit, X, ndims, rank, dtype) fit = 0 exectimes = [] for itr in range(maxIter): tic = time.clock() fitold = fit for n in range(ndims): Utilde = ttm(X, U, n, transp=True, without=True) U[n] = nvecs(Utilde, n, rank[n]) # compute core tensor to get fit core = ttm(Utilde, U, n, transp=True) # since factors are orthonormal, compute fit on core tensor normresidual = sqrt(normX ** 2 - norm(core) ** 2) # fraction explained by model fit = 1 - (normresidual / normX) fitchange = abs(fitold - fit) exectimes.append(time.clock() - tic) _log.debug( '[%3d] fit: %.5f | delta: %7.1e | secs: %.5f' % (itr, fit, fitchange, exectimes[-1]) ) if itr > 1 and fitchange < conv: break return core, U
python
def hooi(X, rank, **kwargs): """ Compute Tucker decomposition of a tensor using Higher-Order Orthogonal Iterations. Parameters ---------- X : tensor_mixin The tensor to be decomposed rank : array_like The rank of the decomposition for each mode of the tensor. The length of ``rank`` must match the number of modes of ``X``. init : {'random', 'nvecs'}, optional The initialization method to use. - random : Factor matrices are initialized randomly. - nvecs : Factor matrices are initialzed via HOSVD. default : 'nvecs' Examples -------- Create dense tensor >>> T = np.zeros((3, 4, 2)) >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] >>> T = dtensor(T) Compute Tucker decomposition of ``T`` with n-rank [2, 3, 1] via higher-order orthogonal iterations >>> Y = hooi(T, [2, 3, 1], init='nvecs') Shape of the core tensor matches n-rank of the decomposition. >>> Y['core'].shape (2, 3, 1) >>> Y['U'][1].shape (3, 2) References ---------- .. [1] L. De Lathauwer, B. De Moor, J. Vandewalle: On the best rank-1 and rank-(R_1, R_2, \ldots, R_N) approximation of higher order tensors; IEEE Trans. Signal Process. 49 (2001), pp. 2262-2271 """ # init options ainit = kwargs.pop('init', __DEF_INIT) maxIter = kwargs.pop('maxIter', __DEF_MAXITER) conv = kwargs.pop('conv', __DEF_CONV) dtype = kwargs.pop('dtype', X.dtype) if not len(kwargs) == 0: raise ValueError('Unknown keywords (%s)' % (kwargs.keys())) ndims = X.ndim if is_number(rank): rank = rank * ones(ndims) normX = norm(X) U = __init(ainit, X, ndims, rank, dtype) fit = 0 exectimes = [] for itr in range(maxIter): tic = time.clock() fitold = fit for n in range(ndims): Utilde = ttm(X, U, n, transp=True, without=True) U[n] = nvecs(Utilde, n, rank[n]) # compute core tensor to get fit core = ttm(Utilde, U, n, transp=True) # since factors are orthonormal, compute fit on core tensor normresidual = sqrt(normX ** 2 - norm(core) ** 2) # fraction explained by model fit = 1 - (normresidual / normX) fitchange = abs(fitold - fit) exectimes.append(time.clock() - tic) _log.debug( '[%3d] fit: %.5f | delta: %7.1e | secs: %.5f' % (itr, fit, fitchange, exectimes[-1]) ) if itr > 1 and fitchange < conv: break return core, U
[ "def", "hooi", "(", "X", ",", "rank", ",", "*", "*", "kwargs", ")", ":", "# init options", "ainit", "=", "kwargs", ".", "pop", "(", "'init'", ",", "__DEF_INIT", ")", "maxIter", "=", "kwargs", ".", "pop", "(", "'maxIter'", ",", "__DEF_MAXITER", ")", "conv", "=", "kwargs", ".", "pop", "(", "'conv'", ",", "__DEF_CONV", ")", "dtype", "=", "kwargs", ".", "pop", "(", "'dtype'", ",", "X", ".", "dtype", ")", "if", "not", "len", "(", "kwargs", ")", "==", "0", ":", "raise", "ValueError", "(", "'Unknown keywords (%s)'", "%", "(", "kwargs", ".", "keys", "(", ")", ")", ")", "ndims", "=", "X", ".", "ndim", "if", "is_number", "(", "rank", ")", ":", "rank", "=", "rank", "*", "ones", "(", "ndims", ")", "normX", "=", "norm", "(", "X", ")", "U", "=", "__init", "(", "ainit", ",", "X", ",", "ndims", ",", "rank", ",", "dtype", ")", "fit", "=", "0", "exectimes", "=", "[", "]", "for", "itr", "in", "range", "(", "maxIter", ")", ":", "tic", "=", "time", ".", "clock", "(", ")", "fitold", "=", "fit", "for", "n", "in", "range", "(", "ndims", ")", ":", "Utilde", "=", "ttm", "(", "X", ",", "U", ",", "n", ",", "transp", "=", "True", ",", "without", "=", "True", ")", "U", "[", "n", "]", "=", "nvecs", "(", "Utilde", ",", "n", ",", "rank", "[", "n", "]", ")", "# compute core tensor to get fit", "core", "=", "ttm", "(", "Utilde", ",", "U", ",", "n", ",", "transp", "=", "True", ")", "# since factors are orthonormal, compute fit on core tensor", "normresidual", "=", "sqrt", "(", "normX", "**", "2", "-", "norm", "(", "core", ")", "**", "2", ")", "# fraction explained by model", "fit", "=", "1", "-", "(", "normresidual", "/", "normX", ")", "fitchange", "=", "abs", "(", "fitold", "-", "fit", ")", "exectimes", ".", "append", "(", "time", ".", "clock", "(", ")", "-", "tic", ")", "_log", ".", "debug", "(", "'[%3d] fit: %.5f | delta: %7.1e | secs: %.5f'", "%", "(", "itr", ",", "fit", ",", "fitchange", ",", "exectimes", "[", "-", "1", "]", ")", ")", "if", "itr", ">", "1", "and", "fitchange", "<", "conv", ":", "break", "return", "core", ",", "U" ]
Compute Tucker decomposition of a tensor using Higher-Order Orthogonal Iterations. Parameters ---------- X : tensor_mixin The tensor to be decomposed rank : array_like The rank of the decomposition for each mode of the tensor. The length of ``rank`` must match the number of modes of ``X``. init : {'random', 'nvecs'}, optional The initialization method to use. - random : Factor matrices are initialized randomly. - nvecs : Factor matrices are initialzed via HOSVD. default : 'nvecs' Examples -------- Create dense tensor >>> T = np.zeros((3, 4, 2)) >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] >>> T = dtensor(T) Compute Tucker decomposition of ``T`` with n-rank [2, 3, 1] via higher-order orthogonal iterations >>> Y = hooi(T, [2, 3, 1], init='nvecs') Shape of the core tensor matches n-rank of the decomposition. >>> Y['core'].shape (2, 3, 1) >>> Y['U'][1].shape (3, 2) References ---------- .. [1] L. De Lathauwer, B. De Moor, J. Vandewalle: On the best rank-1 and rank-(R_1, R_2, \ldots, R_N) approximation of higher order tensors; IEEE Trans. Signal Process. 49 (2001), pp. 2262-2271
[ "Compute", "Tucker", "decomposition", "of", "a", "tensor", "using", "Higher", "-", "Order", "Orthogonal", "Iterations", "." ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/tucker.py#L36-L123
15,021
mnick/scikit-tensor
sktensor/ktensor.py
ktensor.uttkrp
def uttkrp(self, U, mode): """ Unfolded tensor times Khatri-Rao product for Kruskal tensors Parameters ---------- X : tensor_mixin Tensor whose unfolding should be multiplied. U : list of array_like Matrices whose Khatri-Rao product should be multiplied. mode : int Mode in which X should be unfolded. See also -------- sktensor.sptensor.uttkrp : Efficient computation of uttkrp for sparse tensors ttensor.uttkrp : Efficient computation of uttkrp for Tucker operators """ N = self.ndim if mode == 1: R = U[1].shape[1] else: R = U[0].shape[1] W = np.tile(self.lmbda, 1, R) for i in range(mode) + range(mode + 1, N): W = W * dot(self.U[i].T, U[i]) return dot(self.U[mode], W)
python
def uttkrp(self, U, mode): """ Unfolded tensor times Khatri-Rao product for Kruskal tensors Parameters ---------- X : tensor_mixin Tensor whose unfolding should be multiplied. U : list of array_like Matrices whose Khatri-Rao product should be multiplied. mode : int Mode in which X should be unfolded. See also -------- sktensor.sptensor.uttkrp : Efficient computation of uttkrp for sparse tensors ttensor.uttkrp : Efficient computation of uttkrp for Tucker operators """ N = self.ndim if mode == 1: R = U[1].shape[1] else: R = U[0].shape[1] W = np.tile(self.lmbda, 1, R) for i in range(mode) + range(mode + 1, N): W = W * dot(self.U[i].T, U[i]) return dot(self.U[mode], W)
[ "def", "uttkrp", "(", "self", ",", "U", ",", "mode", ")", ":", "N", "=", "self", ".", "ndim", "if", "mode", "==", "1", ":", "R", "=", "U", "[", "1", "]", ".", "shape", "[", "1", "]", "else", ":", "R", "=", "U", "[", "0", "]", ".", "shape", "[", "1", "]", "W", "=", "np", ".", "tile", "(", "self", ".", "lmbda", ",", "1", ",", "R", ")", "for", "i", "in", "range", "(", "mode", ")", "+", "range", "(", "mode", "+", "1", ",", "N", ")", ":", "W", "=", "W", "*", "dot", "(", "self", ".", "U", "[", "i", "]", ".", "T", ",", "U", "[", "i", "]", ")", "return", "dot", "(", "self", ".", "U", "[", "mode", "]", ",", "W", ")" ]
Unfolded tensor times Khatri-Rao product for Kruskal tensors Parameters ---------- X : tensor_mixin Tensor whose unfolding should be multiplied. U : list of array_like Matrices whose Khatri-Rao product should be multiplied. mode : int Mode in which X should be unfolded. See also -------- sktensor.sptensor.uttkrp : Efficient computation of uttkrp for sparse tensors ttensor.uttkrp : Efficient computation of uttkrp for Tucker operators
[ "Unfolded", "tensor", "times", "Khatri", "-", "Rao", "product", "for", "Kruskal", "tensors" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/ktensor.py#L84-L111
15,022
mnick/scikit-tensor
sktensor/ktensor.py
ktensor.norm
def norm(self): """ Efficient computation of the Frobenius norm for ktensors Returns ------- norm : float Frobenius norm of the ktensor """ N = len(self.shape) coef = outer(self.lmbda, self.lmbda) for i in range(N): coef = coef * dot(self.U[i].T, self.U[i]) return np.sqrt(coef.sum())
python
def norm(self): """ Efficient computation of the Frobenius norm for ktensors Returns ------- norm : float Frobenius norm of the ktensor """ N = len(self.shape) coef = outer(self.lmbda, self.lmbda) for i in range(N): coef = coef * dot(self.U[i].T, self.U[i]) return np.sqrt(coef.sum())
[ "def", "norm", "(", "self", ")", ":", "N", "=", "len", "(", "self", ".", "shape", ")", "coef", "=", "outer", "(", "self", ".", "lmbda", ",", "self", ".", "lmbda", ")", "for", "i", "in", "range", "(", "N", ")", ":", "coef", "=", "coef", "*", "dot", "(", "self", ".", "U", "[", "i", "]", ".", "T", ",", "self", ".", "U", "[", "i", "]", ")", "return", "np", ".", "sqrt", "(", "coef", ".", "sum", "(", ")", ")" ]
Efficient computation of the Frobenius norm for ktensors Returns ------- norm : float Frobenius norm of the ktensor
[ "Efficient", "computation", "of", "the", "Frobenius", "norm", "for", "ktensors" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/ktensor.py#L113-L126
15,023
mnick/scikit-tensor
sktensor/ktensor.py
ktensor.innerprod
def innerprod(self, X): """ Efficient computation of the inner product of a ktensor with another tensor Parameters ---------- X : tensor_mixin Tensor to compute the inner product with. Returns ------- p : float Inner product between ktensor and X. """ N = len(self.shape) R = len(self.lmbda) res = 0 for r in range(R): vecs = [] for n in range(N): vecs.append(self.U[n][:, r]) res += self.lmbda[r] * X.ttv(tuple(vecs)) return res
python
def innerprod(self, X): """ Efficient computation of the inner product of a ktensor with another tensor Parameters ---------- X : tensor_mixin Tensor to compute the inner product with. Returns ------- p : float Inner product between ktensor and X. """ N = len(self.shape) R = len(self.lmbda) res = 0 for r in range(R): vecs = [] for n in range(N): vecs.append(self.U[n][:, r]) res += self.lmbda[r] * X.ttv(tuple(vecs)) return res
[ "def", "innerprod", "(", "self", ",", "X", ")", ":", "N", "=", "len", "(", "self", ".", "shape", ")", "R", "=", "len", "(", "self", ".", "lmbda", ")", "res", "=", "0", "for", "r", "in", "range", "(", "R", ")", ":", "vecs", "=", "[", "]", "for", "n", "in", "range", "(", "N", ")", ":", "vecs", ".", "append", "(", "self", ".", "U", "[", "n", "]", "[", ":", ",", "r", "]", ")", "res", "+=", "self", ".", "lmbda", "[", "r", "]", "*", "X", ".", "ttv", "(", "tuple", "(", "vecs", ")", ")", "return", "res" ]
Efficient computation of the inner product of a ktensor with another tensor Parameters ---------- X : tensor_mixin Tensor to compute the inner product with. Returns ------- p : float Inner product between ktensor and X.
[ "Efficient", "computation", "of", "the", "inner", "product", "of", "a", "ktensor", "with", "another", "tensor" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/ktensor.py#L128-L150
15,024
mnick/scikit-tensor
sktensor/ktensor.py
ktensor.toarray
def toarray(self): """ Converts a ktensor into a dense multidimensional ndarray Returns ------- arr : np.ndarray Fully computed multidimensional array whose shape matches the original ktensor. """ A = dot(self.lmbda, khatrirao(tuple(self.U)).T) return A.reshape(self.shape)
python
def toarray(self): """ Converts a ktensor into a dense multidimensional ndarray Returns ------- arr : np.ndarray Fully computed multidimensional array whose shape matches the original ktensor. """ A = dot(self.lmbda, khatrirao(tuple(self.U)).T) return A.reshape(self.shape)
[ "def", "toarray", "(", "self", ")", ":", "A", "=", "dot", "(", "self", ".", "lmbda", ",", "khatrirao", "(", "tuple", "(", "self", ".", "U", ")", ")", ".", "T", ")", "return", "A", ".", "reshape", "(", "self", ".", "shape", ")" ]
Converts a ktensor into a dense multidimensional ndarray Returns ------- arr : np.ndarray Fully computed multidimensional array whose shape matches the original ktensor.
[ "Converts", "a", "ktensor", "into", "a", "dense", "multidimensional", "ndarray" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/ktensor.py#L152-L163
15,025
mnick/scikit-tensor
sktensor/sptensor.py
fromarray
def fromarray(A): """Create a sptensor from a dense numpy array""" subs = np.nonzero(A) vals = A[subs] return sptensor(subs, vals, shape=A.shape, dtype=A.dtype)
python
def fromarray(A): """Create a sptensor from a dense numpy array""" subs = np.nonzero(A) vals = A[subs] return sptensor(subs, vals, shape=A.shape, dtype=A.dtype)
[ "def", "fromarray", "(", "A", ")", ":", "subs", "=", "np", ".", "nonzero", "(", "A", ")", "vals", "=", "A", "[", "subs", "]", "return", "sptensor", "(", "subs", ",", "vals", ",", "shape", "=", "A", ".", "shape", ",", "dtype", "=", "A", ".", "dtype", ")" ]
Create a sptensor from a dense numpy array
[ "Create", "a", "sptensor", "from", "a", "dense", "numpy", "array" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/sptensor.py#L362-L366
15,026
mnick/scikit-tensor
sktensor/sptensor.py
sptensor._ttm_me_compute
def _ttm_me_compute(self, V, edims, sdims, transp): """ Assume Y = T x_i V_i for i = 1...n can fit into memory """ shapeY = np.copy(self.shape) # Determine size of Y for n in np.union1d(edims, sdims): shapeY[n] = V[n].shape[1] if transp else V[n].shape[0] # Allocate Y (final result) and v (vectors for elementwise computations) Y = zeros(shapeY) shapeY = array(shapeY) v = [None for _ in range(len(edims))] for i in range(np.prod(shapeY[edims])): rsubs = unravel_index(shapeY[edims], i)
python
def _ttm_me_compute(self, V, edims, sdims, transp): """ Assume Y = T x_i V_i for i = 1...n can fit into memory """ shapeY = np.copy(self.shape) # Determine size of Y for n in np.union1d(edims, sdims): shapeY[n] = V[n].shape[1] if transp else V[n].shape[0] # Allocate Y (final result) and v (vectors for elementwise computations) Y = zeros(shapeY) shapeY = array(shapeY) v = [None for _ in range(len(edims))] for i in range(np.prod(shapeY[edims])): rsubs = unravel_index(shapeY[edims], i)
[ "def", "_ttm_me_compute", "(", "self", ",", "V", ",", "edims", ",", "sdims", ",", "transp", ")", ":", "shapeY", "=", "np", ".", "copy", "(", "self", ".", "shape", ")", "# Determine size of Y", "for", "n", "in", "np", ".", "union1d", "(", "edims", ",", "sdims", ")", ":", "shapeY", "[", "n", "]", "=", "V", "[", "n", "]", ".", "shape", "[", "1", "]", "if", "transp", "else", "V", "[", "n", "]", ".", "shape", "[", "0", "]", "# Allocate Y (final result) and v (vectors for elementwise computations)", "Y", "=", "zeros", "(", "shapeY", ")", "shapeY", "=", "array", "(", "shapeY", ")", "v", "=", "[", "None", "for", "_", "in", "range", "(", "len", "(", "edims", ")", ")", "]", "for", "i", "in", "range", "(", "np", ".", "prod", "(", "shapeY", "[", "edims", "]", ")", ")", ":", "rsubs", "=", "unravel_index", "(", "shapeY", "[", "edims", "]", ",", "i", ")" ]
Assume Y = T x_i V_i for i = 1...n can fit into memory
[ "Assume", "Y", "=", "T", "x_i", "V_i", "for", "i", "=", "1", "...", "n", "can", "fit", "into", "memory" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/sptensor.py#L179-L195
15,027
mnick/scikit-tensor
sktensor/sptensor.py
sptensor.transpose
def transpose(self, axes=None): """ Compute transpose of sparse tensors. Parameters ---------- axes : array_like of ints, optional Permute the axes according to the values given. Returns ------- d : dtensor dtensor with axes permuted. """ if axes is None: raise NotImplementedError( 'Sparse tensor transposition without axes argument is not supported' ) nsubs = tuple([self.subs[idx] for idx in axes]) nshape = [self.shape[idx] for idx in axes] return sptensor(nsubs, self.vals, nshape)
python
def transpose(self, axes=None): """ Compute transpose of sparse tensors. Parameters ---------- axes : array_like of ints, optional Permute the axes according to the values given. Returns ------- d : dtensor dtensor with axes permuted. """ if axes is None: raise NotImplementedError( 'Sparse tensor transposition without axes argument is not supported' ) nsubs = tuple([self.subs[idx] for idx in axes]) nshape = [self.shape[idx] for idx in axes] return sptensor(nsubs, self.vals, nshape)
[ "def", "transpose", "(", "self", ",", "axes", "=", "None", ")", ":", "if", "axes", "is", "None", ":", "raise", "NotImplementedError", "(", "'Sparse tensor transposition without axes argument is not supported'", ")", "nsubs", "=", "tuple", "(", "[", "self", ".", "subs", "[", "idx", "]", "for", "idx", "in", "axes", "]", ")", "nshape", "=", "[", "self", ".", "shape", "[", "idx", "]", "for", "idx", "in", "axes", "]", "return", "sptensor", "(", "nsubs", ",", "self", ".", "vals", ",", "nshape", ")" ]
Compute transpose of sparse tensors. Parameters ---------- axes : array_like of ints, optional Permute the axes according to the values given. Returns ------- d : dtensor dtensor with axes permuted.
[ "Compute", "transpose", "of", "sparse", "tensors", "." ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/sptensor.py#L232-L252
15,028
mnick/scikit-tensor
sktensor/sptensor.py
sptensor.concatenate
def concatenate(self, tpl, axis=None): """ Concatenates sparse tensors. Parameters ---------- tpl : tuple of sparse tensors Tensors to be concatenated. axis : int, optional Axis along which concatenation should take place """ if axis is None: raise NotImplementedError( 'Sparse tensor concatenation without axis argument is not supported' ) T = self for i in range(1, len(tpl)): T = _single_concatenate(T, tpl[i], axis=axis) return T
python
def concatenate(self, tpl, axis=None): """ Concatenates sparse tensors. Parameters ---------- tpl : tuple of sparse tensors Tensors to be concatenated. axis : int, optional Axis along which concatenation should take place """ if axis is None: raise NotImplementedError( 'Sparse tensor concatenation without axis argument is not supported' ) T = self for i in range(1, len(tpl)): T = _single_concatenate(T, tpl[i], axis=axis) return T
[ "def", "concatenate", "(", "self", ",", "tpl", ",", "axis", "=", "None", ")", ":", "if", "axis", "is", "None", ":", "raise", "NotImplementedError", "(", "'Sparse tensor concatenation without axis argument is not supported'", ")", "T", "=", "self", "for", "i", "in", "range", "(", "1", ",", "len", "(", "tpl", ")", ")", ":", "T", "=", "_single_concatenate", "(", "T", ",", "tpl", "[", "i", "]", ",", "axis", "=", "axis", ")", "return", "T" ]
Concatenates sparse tensors. Parameters ---------- tpl : tuple of sparse tensors Tensors to be concatenated. axis : int, optional Axis along which concatenation should take place
[ "Concatenates", "sparse", "tensors", "." ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/sptensor.py#L254-L272
15,029
mnick/scikit-tensor
sktensor/sptensor.py
unfolded_sptensor.fold
def fold(self): """ Recreate original tensor by folding unfolded_sptensor according toc ``ten_shape``. Returns ------- T : sptensor Sparse tensor that is created by refolding according to ``ten_shape``. """ nsubs = zeros((len(self.data), len(self.ten_shape)), dtype=np.int) if len(self.rdims) > 0: nidx = unravel_index(self.row, self.ten_shape[self.rdims]) for i in range(len(self.rdims)): nsubs[:, self.rdims[i]] = nidx[i] if len(self.cdims) > 0: nidx = unravel_index(self.col, self.ten_shape[self.cdims]) for i in range(len(self.cdims)): nsubs[:, self.cdims[i]] = nidx[i] nsubs = [z.flatten() for z in hsplit(nsubs, len(self.ten_shape))] return sptensor(tuple(nsubs), self.data, self.ten_shape)
python
def fold(self): """ Recreate original tensor by folding unfolded_sptensor according toc ``ten_shape``. Returns ------- T : sptensor Sparse tensor that is created by refolding according to ``ten_shape``. """ nsubs = zeros((len(self.data), len(self.ten_shape)), dtype=np.int) if len(self.rdims) > 0: nidx = unravel_index(self.row, self.ten_shape[self.rdims]) for i in range(len(self.rdims)): nsubs[:, self.rdims[i]] = nidx[i] if len(self.cdims) > 0: nidx = unravel_index(self.col, self.ten_shape[self.cdims]) for i in range(len(self.cdims)): nsubs[:, self.cdims[i]] = nidx[i] nsubs = [z.flatten() for z in hsplit(nsubs, len(self.ten_shape))] return sptensor(tuple(nsubs), self.data, self.ten_shape)
[ "def", "fold", "(", "self", ")", ":", "nsubs", "=", "zeros", "(", "(", "len", "(", "self", ".", "data", ")", ",", "len", "(", "self", ".", "ten_shape", ")", ")", ",", "dtype", "=", "np", ".", "int", ")", "if", "len", "(", "self", ".", "rdims", ")", ">", "0", ":", "nidx", "=", "unravel_index", "(", "self", ".", "row", ",", "self", ".", "ten_shape", "[", "self", ".", "rdims", "]", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "rdims", ")", ")", ":", "nsubs", "[", ":", ",", "self", ".", "rdims", "[", "i", "]", "]", "=", "nidx", "[", "i", "]", "if", "len", "(", "self", ".", "cdims", ")", ">", "0", ":", "nidx", "=", "unravel_index", "(", "self", ".", "col", ",", "self", ".", "ten_shape", "[", "self", ".", "cdims", "]", ")", "for", "i", "in", "range", "(", "len", "(", "self", ".", "cdims", ")", ")", ":", "nsubs", "[", ":", ",", "self", ".", "cdims", "[", "i", "]", "]", "=", "nidx", "[", "i", "]", "nsubs", "=", "[", "z", ".", "flatten", "(", ")", "for", "z", "in", "hsplit", "(", "nsubs", ",", "len", "(", "self", ".", "ten_shape", ")", ")", "]", "return", "sptensor", "(", "tuple", "(", "nsubs", ")", ",", "self", ".", "data", ",", "self", ".", "ten_shape", ")" ]
Recreate original tensor by folding unfolded_sptensor according toc ``ten_shape``. Returns ------- T : sptensor Sparse tensor that is created by refolding according to ``ten_shape``.
[ "Recreate", "original", "tensor", "by", "folding", "unfolded_sptensor", "according", "toc", "ten_shape", "." ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/sptensor.py#L339-L359
15,030
mnick/scikit-tensor
sktensor/rescal.py
_updateA
def _updateA(X, A, R, P, Z, lmbdaA, orthogonalize): """Update step for A""" n, rank = A.shape F = zeros((n, rank), dtype=A.dtype) E = zeros((rank, rank), dtype=A.dtype) AtA = dot(A.T, A) for i in range(len(X)): F += X[i].dot(dot(A, R[i].T)) + X[i].T.dot(dot(A, R[i])) E += dot(R[i], dot(AtA, R[i].T)) + dot(R[i].T, dot(AtA, R[i])) # regularization I = lmbdaA * eye(rank, dtype=A.dtype) # attributes for i in range(len(Z)): F += P[i].dot(Z[i].T) E += dot(Z[i], Z[i].T) # finally compute update for A A = solve(I + E.T, F.T).T return orth(A) if orthogonalize else A
python
def _updateA(X, A, R, P, Z, lmbdaA, orthogonalize): """Update step for A""" n, rank = A.shape F = zeros((n, rank), dtype=A.dtype) E = zeros((rank, rank), dtype=A.dtype) AtA = dot(A.T, A) for i in range(len(X)): F += X[i].dot(dot(A, R[i].T)) + X[i].T.dot(dot(A, R[i])) E += dot(R[i], dot(AtA, R[i].T)) + dot(R[i].T, dot(AtA, R[i])) # regularization I = lmbdaA * eye(rank, dtype=A.dtype) # attributes for i in range(len(Z)): F += P[i].dot(Z[i].T) E += dot(Z[i], Z[i].T) # finally compute update for A A = solve(I + E.T, F.T).T return orth(A) if orthogonalize else A
[ "def", "_updateA", "(", "X", ",", "A", ",", "R", ",", "P", ",", "Z", ",", "lmbdaA", ",", "orthogonalize", ")", ":", "n", ",", "rank", "=", "A", ".", "shape", "F", "=", "zeros", "(", "(", "n", ",", "rank", ")", ",", "dtype", "=", "A", ".", "dtype", ")", "E", "=", "zeros", "(", "(", "rank", ",", "rank", ")", ",", "dtype", "=", "A", ".", "dtype", ")", "AtA", "=", "dot", "(", "A", ".", "T", ",", "A", ")", "for", "i", "in", "range", "(", "len", "(", "X", ")", ")", ":", "F", "+=", "X", "[", "i", "]", ".", "dot", "(", "dot", "(", "A", ",", "R", "[", "i", "]", ".", "T", ")", ")", "+", "X", "[", "i", "]", ".", "T", ".", "dot", "(", "dot", "(", "A", ",", "R", "[", "i", "]", ")", ")", "E", "+=", "dot", "(", "R", "[", "i", "]", ",", "dot", "(", "AtA", ",", "R", "[", "i", "]", ".", "T", ")", ")", "+", "dot", "(", "R", "[", "i", "]", ".", "T", ",", "dot", "(", "AtA", ",", "R", "[", "i", "]", ")", ")", "# regularization", "I", "=", "lmbdaA", "*", "eye", "(", "rank", ",", "dtype", "=", "A", ".", "dtype", ")", "# attributes", "for", "i", "in", "range", "(", "len", "(", "Z", ")", ")", ":", "F", "+=", "P", "[", "i", "]", ".", "dot", "(", "Z", "[", "i", "]", ".", "T", ")", "E", "+=", "dot", "(", "Z", "[", "i", "]", ",", "Z", "[", "i", "]", ".", "T", ")", "# finally compute update for A", "A", "=", "solve", "(", "I", "+", "E", ".", "T", ",", "F", ".", "T", ")", ".", "T", "return", "orth", "(", "A", ")", "if", "orthogonalize", "else", "A" ]
Update step for A
[ "Update", "step", "for", "A" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/rescal.py#L212-L234
15,031
mnick/scikit-tensor
sktensor/rescal.py
_compute_fval
def _compute_fval(X, A, R, P, Z, lmbdaA, lmbdaR, lmbdaZ, normX): """Compute fit for full slices""" f = lmbdaA * norm(A) ** 2 for i in range(len(X)): ARAt = dot(A, dot(R[i], A.T)) f += (norm(X[i] - ARAt) ** 2) / normX[i] + lmbdaR * norm(R[i]) ** 2 return f
python
def _compute_fval(X, A, R, P, Z, lmbdaA, lmbdaR, lmbdaZ, normX): """Compute fit for full slices""" f = lmbdaA * norm(A) ** 2 for i in range(len(X)): ARAt = dot(A, dot(R[i], A.T)) f += (norm(X[i] - ARAt) ** 2) / normX[i] + lmbdaR * norm(R[i]) ** 2 return f
[ "def", "_compute_fval", "(", "X", ",", "A", ",", "R", ",", "P", ",", "Z", ",", "lmbdaA", ",", "lmbdaR", ",", "lmbdaZ", ",", "normX", ")", ":", "f", "=", "lmbdaA", "*", "norm", "(", "A", ")", "**", "2", "for", "i", "in", "range", "(", "len", "(", "X", ")", ")", ":", "ARAt", "=", "dot", "(", "A", ",", "dot", "(", "R", "[", "i", "]", ",", "A", ".", "T", ")", ")", "f", "+=", "(", "norm", "(", "X", "[", "i", "]", "-", "ARAt", ")", "**", "2", ")", "/", "normX", "[", "i", "]", "+", "lmbdaR", "*", "norm", "(", "R", "[", "i", "]", ")", "**", "2", "return", "f" ]
Compute fit for full slices
[ "Compute", "fit", "for", "full", "slices" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/rescal.py#L268-L274
15,032
mnick/scikit-tensor
sktensor/cp.py
als
def als(X, rank, **kwargs): """ Alternating least-sqaures algorithm to compute the CP decomposition. Parameters ---------- X : tensor_mixin The tensor to be decomposed. rank : int Tensor rank of the decomposition. init : {'random', 'nvecs'}, optional The initialization method to use. - random : Factor matrices are initialized randomly. - nvecs : Factor matrices are initialzed via HOSVD. (default 'nvecs') max_iter : int, optional Maximium number of iterations of the ALS algorithm. (default 500) fit_method : {'full', None} The method to compute the fit of the factorization - 'full' : Compute least-squares fit of the dense approximation of. X and X. - None : Do not compute the fit of the factorization, but iterate until ``max_iter`` (Useful for large-scale tensors). (default 'full') conv : float Convergence tolerance on difference of fit between iterations (default 1e-5) Returns ------- P : ktensor Rank ``rank`` factorization of X. ``P.U[i]`` corresponds to the factor matrix for the i-th mode. ``P.lambda[i]`` corresponds to the weight of the i-th mode. fit : float Fit of the factorization compared to ``X`` itr : int Number of iterations that were needed until convergence exectimes : ndarray of floats Time needed for each single iteration Examples -------- Create random dense tensor >>> from sktensor import dtensor, ktensor >>> U = [np.random.rand(i,3) for i in (20, 10, 14)] >>> T = dtensor(ktensor(U).toarray()) Compute rank-3 CP decomposition of ``T`` with ALS >>> P, fit, itr, _ = als(T, 3) Result is a decomposed tensor stored as a Kruskal operator >>> type(P) <class 'sktensor.ktensor.ktensor'> Factorization should be close to original data >>> np.allclose(T, P.totensor()) True References ---------- .. [1] Kolda, T. G. & Bader, B. W. Tensor Decompositions and Applications. SIAM Rev. 51, 455–500 (2009). .. [2] Harshman, R. A. Foundations of the PARAFAC procedure: models and conditions for an 'explanatory' multimodal factor analysis. UCLA Working Papers in Phonetics 16, (1970). .. [3] Carroll, J. D., Chang, J. J. Analysis of individual differences in multidimensional scaling via an N-way generalization of 'Eckart-Young' decomposition. Psychometrika 35, 283–319 (1970). """ # init options ainit = kwargs.pop('init', _DEF_INIT) maxiter = kwargs.pop('max_iter', _DEF_MAXITER) fit_method = kwargs.pop('fit_method', _DEF_FIT_METHOD) conv = kwargs.pop('conv', _DEF_CONV) dtype = kwargs.pop('dtype', _DEF_TYPE) if not len(kwargs) == 0: raise ValueError('Unknown keywords (%s)' % (kwargs.keys())) N = X.ndim normX = norm(X) U = _init(ainit, X, N, rank, dtype) fit = 0 exectimes = [] for itr in range(maxiter): tic = time.clock() fitold = fit for n in range(N): Unew = X.uttkrp(U, n) Y = ones((rank, rank), dtype=dtype) for i in (list(range(n)) + list(range(n + 1, N))): Y = Y * dot(U[i].T, U[i]) Unew = Unew.dot(pinv(Y)) # Normalize if itr == 0: lmbda = sqrt((Unew ** 2).sum(axis=0)) else: lmbda = Unew.max(axis=0) lmbda[lmbda < 1] = 1 U[n] = Unew / lmbda P = ktensor(U, lmbda) if fit_method == 'full': normresidual = normX ** 2 + P.norm() ** 2 - 2 * P.innerprod(X) fit = 1 - (normresidual / normX ** 2) else: fit = itr fitchange = abs(fitold - fit) exectimes.append(time.clock() - tic) _log.debug( '[%3d] fit: %.5f | delta: %7.1e | secs: %.5f' % (itr, fit, fitchange, exectimes[-1]) ) if itr > 0 and fitchange < conv: break return P, fit, itr, array(exectimes)
python
def als(X, rank, **kwargs): """ Alternating least-sqaures algorithm to compute the CP decomposition. Parameters ---------- X : tensor_mixin The tensor to be decomposed. rank : int Tensor rank of the decomposition. init : {'random', 'nvecs'}, optional The initialization method to use. - random : Factor matrices are initialized randomly. - nvecs : Factor matrices are initialzed via HOSVD. (default 'nvecs') max_iter : int, optional Maximium number of iterations of the ALS algorithm. (default 500) fit_method : {'full', None} The method to compute the fit of the factorization - 'full' : Compute least-squares fit of the dense approximation of. X and X. - None : Do not compute the fit of the factorization, but iterate until ``max_iter`` (Useful for large-scale tensors). (default 'full') conv : float Convergence tolerance on difference of fit between iterations (default 1e-5) Returns ------- P : ktensor Rank ``rank`` factorization of X. ``P.U[i]`` corresponds to the factor matrix for the i-th mode. ``P.lambda[i]`` corresponds to the weight of the i-th mode. fit : float Fit of the factorization compared to ``X`` itr : int Number of iterations that were needed until convergence exectimes : ndarray of floats Time needed for each single iteration Examples -------- Create random dense tensor >>> from sktensor import dtensor, ktensor >>> U = [np.random.rand(i,3) for i in (20, 10, 14)] >>> T = dtensor(ktensor(U).toarray()) Compute rank-3 CP decomposition of ``T`` with ALS >>> P, fit, itr, _ = als(T, 3) Result is a decomposed tensor stored as a Kruskal operator >>> type(P) <class 'sktensor.ktensor.ktensor'> Factorization should be close to original data >>> np.allclose(T, P.totensor()) True References ---------- .. [1] Kolda, T. G. & Bader, B. W. Tensor Decompositions and Applications. SIAM Rev. 51, 455–500 (2009). .. [2] Harshman, R. A. Foundations of the PARAFAC procedure: models and conditions for an 'explanatory' multimodal factor analysis. UCLA Working Papers in Phonetics 16, (1970). .. [3] Carroll, J. D., Chang, J. J. Analysis of individual differences in multidimensional scaling via an N-way generalization of 'Eckart-Young' decomposition. Psychometrika 35, 283–319 (1970). """ # init options ainit = kwargs.pop('init', _DEF_INIT) maxiter = kwargs.pop('max_iter', _DEF_MAXITER) fit_method = kwargs.pop('fit_method', _DEF_FIT_METHOD) conv = kwargs.pop('conv', _DEF_CONV) dtype = kwargs.pop('dtype', _DEF_TYPE) if not len(kwargs) == 0: raise ValueError('Unknown keywords (%s)' % (kwargs.keys())) N = X.ndim normX = norm(X) U = _init(ainit, X, N, rank, dtype) fit = 0 exectimes = [] for itr in range(maxiter): tic = time.clock() fitold = fit for n in range(N): Unew = X.uttkrp(U, n) Y = ones((rank, rank), dtype=dtype) for i in (list(range(n)) + list(range(n + 1, N))): Y = Y * dot(U[i].T, U[i]) Unew = Unew.dot(pinv(Y)) # Normalize if itr == 0: lmbda = sqrt((Unew ** 2).sum(axis=0)) else: lmbda = Unew.max(axis=0) lmbda[lmbda < 1] = 1 U[n] = Unew / lmbda P = ktensor(U, lmbda) if fit_method == 'full': normresidual = normX ** 2 + P.norm() ** 2 - 2 * P.innerprod(X) fit = 1 - (normresidual / normX ** 2) else: fit = itr fitchange = abs(fitold - fit) exectimes.append(time.clock() - tic) _log.debug( '[%3d] fit: %.5f | delta: %7.1e | secs: %.5f' % (itr, fit, fitchange, exectimes[-1]) ) if itr > 0 and fitchange < conv: break return P, fit, itr, array(exectimes)
[ "def", "als", "(", "X", ",", "rank", ",", "*", "*", "kwargs", ")", ":", "# init options", "ainit", "=", "kwargs", ".", "pop", "(", "'init'", ",", "_DEF_INIT", ")", "maxiter", "=", "kwargs", ".", "pop", "(", "'max_iter'", ",", "_DEF_MAXITER", ")", "fit_method", "=", "kwargs", ".", "pop", "(", "'fit_method'", ",", "_DEF_FIT_METHOD", ")", "conv", "=", "kwargs", ".", "pop", "(", "'conv'", ",", "_DEF_CONV", ")", "dtype", "=", "kwargs", ".", "pop", "(", "'dtype'", ",", "_DEF_TYPE", ")", "if", "not", "len", "(", "kwargs", ")", "==", "0", ":", "raise", "ValueError", "(", "'Unknown keywords (%s)'", "%", "(", "kwargs", ".", "keys", "(", ")", ")", ")", "N", "=", "X", ".", "ndim", "normX", "=", "norm", "(", "X", ")", "U", "=", "_init", "(", "ainit", ",", "X", ",", "N", ",", "rank", ",", "dtype", ")", "fit", "=", "0", "exectimes", "=", "[", "]", "for", "itr", "in", "range", "(", "maxiter", ")", ":", "tic", "=", "time", ".", "clock", "(", ")", "fitold", "=", "fit", "for", "n", "in", "range", "(", "N", ")", ":", "Unew", "=", "X", ".", "uttkrp", "(", "U", ",", "n", ")", "Y", "=", "ones", "(", "(", "rank", ",", "rank", ")", ",", "dtype", "=", "dtype", ")", "for", "i", "in", "(", "list", "(", "range", "(", "n", ")", ")", "+", "list", "(", "range", "(", "n", "+", "1", ",", "N", ")", ")", ")", ":", "Y", "=", "Y", "*", "dot", "(", "U", "[", "i", "]", ".", "T", ",", "U", "[", "i", "]", ")", "Unew", "=", "Unew", ".", "dot", "(", "pinv", "(", "Y", ")", ")", "# Normalize", "if", "itr", "==", "0", ":", "lmbda", "=", "sqrt", "(", "(", "Unew", "**", "2", ")", ".", "sum", "(", "axis", "=", "0", ")", ")", "else", ":", "lmbda", "=", "Unew", ".", "max", "(", "axis", "=", "0", ")", "lmbda", "[", "lmbda", "<", "1", "]", "=", "1", "U", "[", "n", "]", "=", "Unew", "/", "lmbda", "P", "=", "ktensor", "(", "U", ",", "lmbda", ")", "if", "fit_method", "==", "'full'", ":", "normresidual", "=", "normX", "**", "2", "+", "P", ".", "norm", "(", ")", "**", "2", "-", "2", "*", "P", ".", "innerprod", "(", "X", ")", "fit", "=", "1", "-", "(", "normresidual", "/", "normX", "**", "2", ")", "else", ":", "fit", "=", "itr", "fitchange", "=", "abs", "(", "fitold", "-", "fit", ")", "exectimes", ".", "append", "(", "time", ".", "clock", "(", ")", "-", "tic", ")", "_log", ".", "debug", "(", "'[%3d] fit: %.5f | delta: %7.1e | secs: %.5f'", "%", "(", "itr", ",", "fit", ",", "fitchange", ",", "exectimes", "[", "-", "1", "]", ")", ")", "if", "itr", ">", "0", "and", "fitchange", "<", "conv", ":", "break", "return", "P", ",", "fit", ",", "itr", ",", "array", "(", "exectimes", ")" ]
Alternating least-sqaures algorithm to compute the CP decomposition. Parameters ---------- X : tensor_mixin The tensor to be decomposed. rank : int Tensor rank of the decomposition. init : {'random', 'nvecs'}, optional The initialization method to use. - random : Factor matrices are initialized randomly. - nvecs : Factor matrices are initialzed via HOSVD. (default 'nvecs') max_iter : int, optional Maximium number of iterations of the ALS algorithm. (default 500) fit_method : {'full', None} The method to compute the fit of the factorization - 'full' : Compute least-squares fit of the dense approximation of. X and X. - None : Do not compute the fit of the factorization, but iterate until ``max_iter`` (Useful for large-scale tensors). (default 'full') conv : float Convergence tolerance on difference of fit between iterations (default 1e-5) Returns ------- P : ktensor Rank ``rank`` factorization of X. ``P.U[i]`` corresponds to the factor matrix for the i-th mode. ``P.lambda[i]`` corresponds to the weight of the i-th mode. fit : float Fit of the factorization compared to ``X`` itr : int Number of iterations that were needed until convergence exectimes : ndarray of floats Time needed for each single iteration Examples -------- Create random dense tensor >>> from sktensor import dtensor, ktensor >>> U = [np.random.rand(i,3) for i in (20, 10, 14)] >>> T = dtensor(ktensor(U).toarray()) Compute rank-3 CP decomposition of ``T`` with ALS >>> P, fit, itr, _ = als(T, 3) Result is a decomposed tensor stored as a Kruskal operator >>> type(P) <class 'sktensor.ktensor.ktensor'> Factorization should be close to original data >>> np.allclose(T, P.totensor()) True References ---------- .. [1] Kolda, T. G. & Bader, B. W. Tensor Decompositions and Applications. SIAM Rev. 51, 455–500 (2009). .. [2] Harshman, R. A. Foundations of the PARAFAC procedure: models and conditions for an 'explanatory' multimodal factor analysis. UCLA Working Papers in Phonetics 16, (1970). .. [3] Carroll, J. D., Chang, J. J. Analysis of individual differences in multidimensional scaling via an N-way generalization of 'Eckart-Young' decomposition. Psychometrika 35, 283–319 (1970).
[ "Alternating", "least", "-", "sqaures", "algorithm", "to", "compute", "the", "CP", "decomposition", "." ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/cp.py#L46-L171
15,033
mnick/scikit-tensor
sktensor/cp.py
_init
def _init(init, X, N, rank, dtype): """ Initialization for CP models """ Uinit = [None for _ in range(N)] if isinstance(init, list): Uinit = init elif init == 'random': for n in range(1, N): Uinit[n] = array(rand(X.shape[n], rank), dtype=dtype) elif init == 'nvecs': for n in range(1, N): Uinit[n] = array(nvecs(X, n, rank), dtype=dtype) else: raise 'Unknown option (init=%s)' % str(init) return Uinit
python
def _init(init, X, N, rank, dtype): """ Initialization for CP models """ Uinit = [None for _ in range(N)] if isinstance(init, list): Uinit = init elif init == 'random': for n in range(1, N): Uinit[n] = array(rand(X.shape[n], rank), dtype=dtype) elif init == 'nvecs': for n in range(1, N): Uinit[n] = array(nvecs(X, n, rank), dtype=dtype) else: raise 'Unknown option (init=%s)' % str(init) return Uinit
[ "def", "_init", "(", "init", ",", "X", ",", "N", ",", "rank", ",", "dtype", ")", ":", "Uinit", "=", "[", "None", "for", "_", "in", "range", "(", "N", ")", "]", "if", "isinstance", "(", "init", ",", "list", ")", ":", "Uinit", "=", "init", "elif", "init", "==", "'random'", ":", "for", "n", "in", "range", "(", "1", ",", "N", ")", ":", "Uinit", "[", "n", "]", "=", "array", "(", "rand", "(", "X", ".", "shape", "[", "n", "]", ",", "rank", ")", ",", "dtype", "=", "dtype", ")", "elif", "init", "==", "'nvecs'", ":", "for", "n", "in", "range", "(", "1", ",", "N", ")", ":", "Uinit", "[", "n", "]", "=", "array", "(", "nvecs", "(", "X", ",", "n", ",", "rank", ")", ",", "dtype", "=", "dtype", ")", "else", ":", "raise", "'Unknown option (init=%s)'", "%", "str", "(", "init", ")", "return", "Uinit" ]
Initialization for CP models
[ "Initialization", "for", "CP", "models" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/cp.py#L190-L205
15,034
mnick/scikit-tensor
sktensor/core.py
nvecs
def nvecs(X, n, rank, do_flipsign=True, dtype=np.float): """ Eigendecomposition of mode-n unfolding of a tensor """ Xn = X.unfold(n) if issparse_mat(Xn): Xn = csr_matrix(Xn, dtype=dtype) Y = Xn.dot(Xn.T) _, U = eigsh(Y, rank, which='LM') else: Y = Xn.dot(Xn.T) N = Y.shape[0] _, U = eigh(Y, eigvals=(N - rank, N - 1)) #_, U = eigsh(Y, rank, which='LM') # reverse order of eigenvectors such that eigenvalues are decreasing U = array(U[:, ::-1]) # flip sign if do_flipsign: U = flipsign(U) return U
python
def nvecs(X, n, rank, do_flipsign=True, dtype=np.float): """ Eigendecomposition of mode-n unfolding of a tensor """ Xn = X.unfold(n) if issparse_mat(Xn): Xn = csr_matrix(Xn, dtype=dtype) Y = Xn.dot(Xn.T) _, U = eigsh(Y, rank, which='LM') else: Y = Xn.dot(Xn.T) N = Y.shape[0] _, U = eigh(Y, eigvals=(N - rank, N - 1)) #_, U = eigsh(Y, rank, which='LM') # reverse order of eigenvectors such that eigenvalues are decreasing U = array(U[:, ::-1]) # flip sign if do_flipsign: U = flipsign(U) return U
[ "def", "nvecs", "(", "X", ",", "n", ",", "rank", ",", "do_flipsign", "=", "True", ",", "dtype", "=", "np", ".", "float", ")", ":", "Xn", "=", "X", ".", "unfold", "(", "n", ")", "if", "issparse_mat", "(", "Xn", ")", ":", "Xn", "=", "csr_matrix", "(", "Xn", ",", "dtype", "=", "dtype", ")", "Y", "=", "Xn", ".", "dot", "(", "Xn", ".", "T", ")", "_", ",", "U", "=", "eigsh", "(", "Y", ",", "rank", ",", "which", "=", "'LM'", ")", "else", ":", "Y", "=", "Xn", ".", "dot", "(", "Xn", ".", "T", ")", "N", "=", "Y", ".", "shape", "[", "0", "]", "_", ",", "U", "=", "eigh", "(", "Y", ",", "eigvals", "=", "(", "N", "-", "rank", ",", "N", "-", "1", ")", ")", "#_, U = eigsh(Y, rank, which='LM')", "# reverse order of eigenvectors such that eigenvalues are decreasing", "U", "=", "array", "(", "U", "[", ":", ",", ":", ":", "-", "1", "]", ")", "# flip sign", "if", "do_flipsign", ":", "U", "=", "flipsign", "(", "U", ")", "return", "U" ]
Eigendecomposition of mode-n unfolding of a tensor
[ "Eigendecomposition", "of", "mode", "-", "n", "unfolding", "of", "a", "tensor" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/core.py#L275-L294
15,035
mnick/scikit-tensor
sktensor/core.py
flipsign
def flipsign(U): """ Flip sign of factor matrices such that largest magnitude element will be positive """ midx = abs(U).argmax(axis=0) for i in range(U.shape[1]): if U[midx[i], i] < 0: U[:, i] = -U[:, i] return U
python
def flipsign(U): """ Flip sign of factor matrices such that largest magnitude element will be positive """ midx = abs(U).argmax(axis=0) for i in range(U.shape[1]): if U[midx[i], i] < 0: U[:, i] = -U[:, i] return U
[ "def", "flipsign", "(", "U", ")", ":", "midx", "=", "abs", "(", "U", ")", ".", "argmax", "(", "axis", "=", "0", ")", "for", "i", "in", "range", "(", "U", ".", "shape", "[", "1", "]", ")", ":", "if", "U", "[", "midx", "[", "i", "]", ",", "i", "]", "<", "0", ":", "U", "[", ":", ",", "i", "]", "=", "-", "U", "[", ":", ",", "i", "]", "return", "U" ]
Flip sign of factor matrices such that largest magnitude element will be positive
[ "Flip", "sign", "of", "factor", "matrices", "such", "that", "largest", "magnitude", "element", "will", "be", "positive" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/core.py#L297-L306
15,036
mnick/scikit-tensor
sktensor/core.py
khatrirao
def khatrirao(A, reverse=False): """ Compute the columnwise Khatri-Rao product. Parameters ---------- A : tuple of ndarrays Matrices for which the columnwise Khatri-Rao product should be computed reverse : boolean Compute Khatri-Rao product in reverse order Examples -------- >>> A = np.random.randn(5, 2) >>> B = np.random.randn(4, 2) >>> C = khatrirao((A, B)) >>> C.shape (20, 2) >>> (C[:, 0] == np.kron(A[:, 0], B[:, 0])).all() true >>> (C[:, 1] == np.kron(A[:, 1], B[:, 1])).all() true """ if not isinstance(A, tuple): raise ValueError('A must be a tuple of array likes') N = A[0].shape[1] M = 1 for i in range(len(A)): if A[i].ndim != 2: raise ValueError('A must be a tuple of matrices (A[%d].ndim = %d)' % (i, A[i].ndim)) elif N != A[i].shape[1]: raise ValueError('All matrices must have same number of columns') M *= A[i].shape[0] matorder = arange(len(A)) if reverse: matorder = matorder[::-1] # preallocate P = np.zeros((M, N), dtype=A[0].dtype) for n in range(N): ab = A[matorder[0]][:, n] for j in range(1, len(matorder)): ab = np.kron(ab, A[matorder[j]][:, n]) P[:, n] = ab return P
python
def khatrirao(A, reverse=False): """ Compute the columnwise Khatri-Rao product. Parameters ---------- A : tuple of ndarrays Matrices for which the columnwise Khatri-Rao product should be computed reverse : boolean Compute Khatri-Rao product in reverse order Examples -------- >>> A = np.random.randn(5, 2) >>> B = np.random.randn(4, 2) >>> C = khatrirao((A, B)) >>> C.shape (20, 2) >>> (C[:, 0] == np.kron(A[:, 0], B[:, 0])).all() true >>> (C[:, 1] == np.kron(A[:, 1], B[:, 1])).all() true """ if not isinstance(A, tuple): raise ValueError('A must be a tuple of array likes') N = A[0].shape[1] M = 1 for i in range(len(A)): if A[i].ndim != 2: raise ValueError('A must be a tuple of matrices (A[%d].ndim = %d)' % (i, A[i].ndim)) elif N != A[i].shape[1]: raise ValueError('All matrices must have same number of columns') M *= A[i].shape[0] matorder = arange(len(A)) if reverse: matorder = matorder[::-1] # preallocate P = np.zeros((M, N), dtype=A[0].dtype) for n in range(N): ab = A[matorder[0]][:, n] for j in range(1, len(matorder)): ab = np.kron(ab, A[matorder[j]][:, n]) P[:, n] = ab return P
[ "def", "khatrirao", "(", "A", ",", "reverse", "=", "False", ")", ":", "if", "not", "isinstance", "(", "A", ",", "tuple", ")", ":", "raise", "ValueError", "(", "'A must be a tuple of array likes'", ")", "N", "=", "A", "[", "0", "]", ".", "shape", "[", "1", "]", "M", "=", "1", "for", "i", "in", "range", "(", "len", "(", "A", ")", ")", ":", "if", "A", "[", "i", "]", ".", "ndim", "!=", "2", ":", "raise", "ValueError", "(", "'A must be a tuple of matrices (A[%d].ndim = %d)'", "%", "(", "i", ",", "A", "[", "i", "]", ".", "ndim", ")", ")", "elif", "N", "!=", "A", "[", "i", "]", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "'All matrices must have same number of columns'", ")", "M", "*=", "A", "[", "i", "]", ".", "shape", "[", "0", "]", "matorder", "=", "arange", "(", "len", "(", "A", ")", ")", "if", "reverse", ":", "matorder", "=", "matorder", "[", ":", ":", "-", "1", "]", "# preallocate", "P", "=", "np", ".", "zeros", "(", "(", "M", ",", "N", ")", ",", "dtype", "=", "A", "[", "0", "]", ".", "dtype", ")", "for", "n", "in", "range", "(", "N", ")", ":", "ab", "=", "A", "[", "matorder", "[", "0", "]", "]", "[", ":", ",", "n", "]", "for", "j", "in", "range", "(", "1", ",", "len", "(", "matorder", ")", ")", ":", "ab", "=", "np", ".", "kron", "(", "ab", ",", "A", "[", "matorder", "[", "j", "]", "]", "[", ":", ",", "n", "]", ")", "P", "[", ":", ",", "n", "]", "=", "ab", "return", "P" ]
Compute the columnwise Khatri-Rao product. Parameters ---------- A : tuple of ndarrays Matrices for which the columnwise Khatri-Rao product should be computed reverse : boolean Compute Khatri-Rao product in reverse order Examples -------- >>> A = np.random.randn(5, 2) >>> B = np.random.randn(4, 2) >>> C = khatrirao((A, B)) >>> C.shape (20, 2) >>> (C[:, 0] == np.kron(A[:, 0], B[:, 0])).all() true >>> (C[:, 1] == np.kron(A[:, 1], B[:, 1])).all() true
[ "Compute", "the", "columnwise", "Khatri", "-", "Rao", "product", "." ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/core.py#L333-L378
15,037
mnick/scikit-tensor
sktensor/core.py
teneye
def teneye(dim, order): """ Create tensor with superdiagonal all one, rest zeros """ I = zeros(dim ** order) for f in range(dim): idd = f for i in range(1, order): idd = idd + dim ** (i - 1) * (f - 1) I[idd] = 1 return I.reshape(ones(order) * dim)
python
def teneye(dim, order): """ Create tensor with superdiagonal all one, rest zeros """ I = zeros(dim ** order) for f in range(dim): idd = f for i in range(1, order): idd = idd + dim ** (i - 1) * (f - 1) I[idd] = 1 return I.reshape(ones(order) * dim)
[ "def", "teneye", "(", "dim", ",", "order", ")", ":", "I", "=", "zeros", "(", "dim", "**", "order", ")", "for", "f", "in", "range", "(", "dim", ")", ":", "idd", "=", "f", "for", "i", "in", "range", "(", "1", ",", "order", ")", ":", "idd", "=", "idd", "+", "dim", "**", "(", "i", "-", "1", ")", "*", "(", "f", "-", "1", ")", "I", "[", "idd", "]", "=", "1", "return", "I", ".", "reshape", "(", "ones", "(", "order", ")", "*", "dim", ")" ]
Create tensor with superdiagonal all one, rest zeros
[ "Create", "tensor", "with", "superdiagonal", "all", "one", "rest", "zeros" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/core.py#L381-L391
15,038
mnick/scikit-tensor
sktensor/core.py
tensor_mixin.ttm
def ttm(self, V, mode=None, transp=False, without=False): """ Tensor times matrix product Parameters ---------- V : M x N array_like or list of M_i x N_i array_likes Matrix or list of matrices for which the tensor times matrix products should be performed mode : int or list of int's, optional Modes along which the tensor times matrix products should be performed transp: boolean, optional If True, tensor times matrix products are computed with transpositions of matrices without: boolean, optional It True, tensor times matrix products are performed along all modes **except** the modes specified via parameter ``mode`` Examples -------- Create dense tensor >>> T = zeros((3, 4, 2)) >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] >>> T = dtensor(T) Create matrix >>> V = array([[1, 3, 5], [2, 4, 6]]) Multiply tensor with matrix along mode 0 >>> Y = T.ttm(V, 0) >>> Y[:, :, 0] array([[ 22., 49., 76., 103.], [ 28., 64., 100., 136.]]) >>> Y[:, :, 1] array([[ 130., 157., 184., 211.], [ 172., 208., 244., 280.]]) """ if mode is None: mode = range(self.ndim) if isinstance(V, np.ndarray): Y = self._ttm_compute(V, mode, transp) elif is_sequence(V): dims, vidx = check_multiplication_dims(mode, self.ndim, len(V), vidx=True, without=without) Y = self._ttm_compute(V[vidx[0]], dims[0], transp) for i in xrange(1, len(dims)): Y = Y._ttm_compute(V[vidx[i]], dims[i], transp) return Y
python
def ttm(self, V, mode=None, transp=False, without=False): """ Tensor times matrix product Parameters ---------- V : M x N array_like or list of M_i x N_i array_likes Matrix or list of matrices for which the tensor times matrix products should be performed mode : int or list of int's, optional Modes along which the tensor times matrix products should be performed transp: boolean, optional If True, tensor times matrix products are computed with transpositions of matrices without: boolean, optional It True, tensor times matrix products are performed along all modes **except** the modes specified via parameter ``mode`` Examples -------- Create dense tensor >>> T = zeros((3, 4, 2)) >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] >>> T = dtensor(T) Create matrix >>> V = array([[1, 3, 5], [2, 4, 6]]) Multiply tensor with matrix along mode 0 >>> Y = T.ttm(V, 0) >>> Y[:, :, 0] array([[ 22., 49., 76., 103.], [ 28., 64., 100., 136.]]) >>> Y[:, :, 1] array([[ 130., 157., 184., 211.], [ 172., 208., 244., 280.]]) """ if mode is None: mode = range(self.ndim) if isinstance(V, np.ndarray): Y = self._ttm_compute(V, mode, transp) elif is_sequence(V): dims, vidx = check_multiplication_dims(mode, self.ndim, len(V), vidx=True, without=without) Y = self._ttm_compute(V[vidx[0]], dims[0], transp) for i in xrange(1, len(dims)): Y = Y._ttm_compute(V[vidx[i]], dims[i], transp) return Y
[ "def", "ttm", "(", "self", ",", "V", ",", "mode", "=", "None", ",", "transp", "=", "False", ",", "without", "=", "False", ")", ":", "if", "mode", "is", "None", ":", "mode", "=", "range", "(", "self", ".", "ndim", ")", "if", "isinstance", "(", "V", ",", "np", ".", "ndarray", ")", ":", "Y", "=", "self", ".", "_ttm_compute", "(", "V", ",", "mode", ",", "transp", ")", "elif", "is_sequence", "(", "V", ")", ":", "dims", ",", "vidx", "=", "check_multiplication_dims", "(", "mode", ",", "self", ".", "ndim", ",", "len", "(", "V", ")", ",", "vidx", "=", "True", ",", "without", "=", "without", ")", "Y", "=", "self", ".", "_ttm_compute", "(", "V", "[", "vidx", "[", "0", "]", "]", ",", "dims", "[", "0", "]", ",", "transp", ")", "for", "i", "in", "xrange", "(", "1", ",", "len", "(", "dims", ")", ")", ":", "Y", "=", "Y", ".", "_ttm_compute", "(", "V", "[", "vidx", "[", "i", "]", "]", ",", "dims", "[", "i", "]", ",", "transp", ")", "return", "Y" ]
Tensor times matrix product Parameters ---------- V : M x N array_like or list of M_i x N_i array_likes Matrix or list of matrices for which the tensor times matrix products should be performed mode : int or list of int's, optional Modes along which the tensor times matrix products should be performed transp: boolean, optional If True, tensor times matrix products are computed with transpositions of matrices without: boolean, optional It True, tensor times matrix products are performed along all modes **except** the modes specified via parameter ``mode`` Examples -------- Create dense tensor >>> T = zeros((3, 4, 2)) >>> T[:, :, 0] = [[ 1, 4, 7, 10], [ 2, 5, 8, 11], [3, 6, 9, 12]] >>> T[:, :, 1] = [[13, 16, 19, 22], [14, 17, 20, 23], [15, 18, 21, 24]] >>> T = dtensor(T) Create matrix >>> V = array([[1, 3, 5], [2, 4, 6]]) Multiply tensor with matrix along mode 0 >>> Y = T.ttm(V, 0) >>> Y[:, :, 0] array([[ 22., 49., 76., 103.], [ 28., 64., 100., 136.]]) >>> Y[:, :, 1] array([[ 130., 157., 184., 211.], [ 172., 208., 244., 280.]])
[ "Tensor", "times", "matrix", "product" ]
fe517e9661a08164b8d30d2dddf7c96aeeabcf36
https://github.com/mnick/scikit-tensor/blob/fe517e9661a08164b8d30d2dddf7c96aeeabcf36/sktensor/core.py#L50-L103
15,039
callowayproject/django-categories
categories/registration.py
_process_registry
def _process_registry(registry, call_func): """ Given a dictionary, and a registration function, process the registry """ from django.core.exceptions import ImproperlyConfigured from django.apps import apps for key, value in list(registry.items()): model = apps.get_model(*key.split('.')) if model is None: raise ImproperlyConfigured(_('%(key)s is not a model') % {'key': key}) if isinstance(value, (tuple, list)): for item in value: if isinstance(item, str): call_func(model, item) elif isinstance(item, dict): field_name = item.pop('name') call_func(model, field_name, extra_params=item) else: raise ImproperlyConfigured(_("%(settings)s doesn't recognize the value of %(key)s") % {'settings': 'CATEGORY_SETTINGS', 'key': key}) elif isinstance(value, str): call_func(model, value) elif isinstance(value, dict): field_name = value.pop('name') call_func(model, field_name, extra_params=value) else: raise ImproperlyConfigured(_("%(settings)s doesn't recognize the value of %(key)s") % {'settings': 'CATEGORY_SETTINGS', 'key': key})
python
def _process_registry(registry, call_func): """ Given a dictionary, and a registration function, process the registry """ from django.core.exceptions import ImproperlyConfigured from django.apps import apps for key, value in list(registry.items()): model = apps.get_model(*key.split('.')) if model is None: raise ImproperlyConfigured(_('%(key)s is not a model') % {'key': key}) if isinstance(value, (tuple, list)): for item in value: if isinstance(item, str): call_func(model, item) elif isinstance(item, dict): field_name = item.pop('name') call_func(model, field_name, extra_params=item) else: raise ImproperlyConfigured(_("%(settings)s doesn't recognize the value of %(key)s") % {'settings': 'CATEGORY_SETTINGS', 'key': key}) elif isinstance(value, str): call_func(model, value) elif isinstance(value, dict): field_name = value.pop('name') call_func(model, field_name, extra_params=value) else: raise ImproperlyConfigured(_("%(settings)s doesn't recognize the value of %(key)s") % {'settings': 'CATEGORY_SETTINGS', 'key': key})
[ "def", "_process_registry", "(", "registry", ",", "call_func", ")", ":", "from", "django", ".", "core", ".", "exceptions", "import", "ImproperlyConfigured", "from", "django", ".", "apps", "import", "apps", "for", "key", ",", "value", "in", "list", "(", "registry", ".", "items", "(", ")", ")", ":", "model", "=", "apps", ".", "get_model", "(", "*", "key", ".", "split", "(", "'.'", ")", ")", "if", "model", "is", "None", ":", "raise", "ImproperlyConfigured", "(", "_", "(", "'%(key)s is not a model'", ")", "%", "{", "'key'", ":", "key", "}", ")", "if", "isinstance", "(", "value", ",", "(", "tuple", ",", "list", ")", ")", ":", "for", "item", "in", "value", ":", "if", "isinstance", "(", "item", ",", "str", ")", ":", "call_func", "(", "model", ",", "item", ")", "elif", "isinstance", "(", "item", ",", "dict", ")", ":", "field_name", "=", "item", ".", "pop", "(", "'name'", ")", "call_func", "(", "model", ",", "field_name", ",", "extra_params", "=", "item", ")", "else", ":", "raise", "ImproperlyConfigured", "(", "_", "(", "\"%(settings)s doesn't recognize the value of %(key)s\"", ")", "%", "{", "'settings'", ":", "'CATEGORY_SETTINGS'", ",", "'key'", ":", "key", "}", ")", "elif", "isinstance", "(", "value", ",", "str", ")", ":", "call_func", "(", "model", ",", "value", ")", "elif", "isinstance", "(", "value", ",", "dict", ")", ":", "field_name", "=", "value", ".", "pop", "(", "'name'", ")", "call_func", "(", "model", ",", "field_name", ",", "extra_params", "=", "value", ")", "else", ":", "raise", "ImproperlyConfigured", "(", "_", "(", "\"%(settings)s doesn't recognize the value of %(key)s\"", ")", "%", "{", "'settings'", ":", "'CATEGORY_SETTINGS'", ",", "'key'", ":", "key", "}", ")" ]
Given a dictionary, and a registration function, process the registry
[ "Given", "a", "dictionary", "and", "a", "registration", "function", "process", "the", "registry" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/registration.py#L118-L146
15,040
callowayproject/django-categories
categories/migration.py
field_exists
def field_exists(app_name, model_name, field_name): """ Does the FK or M2M table exist in the database already? """ model = apps.get_model(app_name, model_name) table_name = model._meta.db_table cursor = connection.cursor() field_info = connection.introspection.get_table_description(cursor, table_name) field_names = [f.name for f in field_info] return field_name in field_names
python
def field_exists(app_name, model_name, field_name): """ Does the FK or M2M table exist in the database already? """ model = apps.get_model(app_name, model_name) table_name = model._meta.db_table cursor = connection.cursor() field_info = connection.introspection.get_table_description(cursor, table_name) field_names = [f.name for f in field_info] return field_name in field_names
[ "def", "field_exists", "(", "app_name", ",", "model_name", ",", "field_name", ")", ":", "model", "=", "apps", ".", "get_model", "(", "app_name", ",", "model_name", ")", "table_name", "=", "model", ".", "_meta", ".", "db_table", "cursor", "=", "connection", ".", "cursor", "(", ")", "field_info", "=", "connection", ".", "introspection", ".", "get_table_description", "(", "cursor", ",", "table_name", ")", "field_names", "=", "[", "f", ".", "name", "for", "f", "in", "field_info", "]", "return", "field_name", "in", "field_names" ]
Does the FK or M2M table exist in the database already?
[ "Does", "the", "FK", "or", "M2M", "table", "exist", "in", "the", "database", "already?" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/migration.py#L16-L25
15,041
callowayproject/django-categories
categories/migration.py
drop_field
def drop_field(app_name, model_name, field_name): """ Drop the given field from the app's model """ app_config = apps.get_app_config(app_name) model = app_config.get_model(model_name) field = model._meta.get_field(field_name) with connection.schema_editor() as schema_editor: schema_editor.remove_field(model, field)
python
def drop_field(app_name, model_name, field_name): """ Drop the given field from the app's model """ app_config = apps.get_app_config(app_name) model = app_config.get_model(model_name) field = model._meta.get_field(field_name) with connection.schema_editor() as schema_editor: schema_editor.remove_field(model, field)
[ "def", "drop_field", "(", "app_name", ",", "model_name", ",", "field_name", ")", ":", "app_config", "=", "apps", ".", "get_app_config", "(", "app_name", ")", "model", "=", "app_config", ".", "get_model", "(", "model_name", ")", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "field_name", ")", "with", "connection", ".", "schema_editor", "(", ")", "as", "schema_editor", ":", "schema_editor", ".", "remove_field", "(", "model", ",", "field", ")" ]
Drop the given field from the app's model
[ "Drop", "the", "given", "field", "from", "the", "app", "s", "model" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/migration.py#L28-L36
15,042
callowayproject/django-categories
categories/migration.py
migrate_app
def migrate_app(sender, *args, **kwargs): """ Migrate all models of this app registered """ from .registration import registry if 'app_config' not in kwargs: return app_config = kwargs['app_config'] app_name = app_config.label fields = [fld for fld in list(registry._field_registry.keys()) if fld.startswith(app_name)] sid = transaction.savepoint() for fld in fields: model_name, field_name = fld.split('.')[1:] if field_exists(app_name, model_name, field_name): continue model = app_config.get_model(model_name) try: with connection.schema_editor() as schema_editor: schema_editor.add_field(model, registry._field_registry[fld]) if sid: transaction.savepoint_commit(sid) except ProgrammingError: if sid: transaction.savepoint_rollback(sid) continue
python
def migrate_app(sender, *args, **kwargs): """ Migrate all models of this app registered """ from .registration import registry if 'app_config' not in kwargs: return app_config = kwargs['app_config'] app_name = app_config.label fields = [fld for fld in list(registry._field_registry.keys()) if fld.startswith(app_name)] sid = transaction.savepoint() for fld in fields: model_name, field_name = fld.split('.')[1:] if field_exists(app_name, model_name, field_name): continue model = app_config.get_model(model_name) try: with connection.schema_editor() as schema_editor: schema_editor.add_field(model, registry._field_registry[fld]) if sid: transaction.savepoint_commit(sid) except ProgrammingError: if sid: transaction.savepoint_rollback(sid) continue
[ "def", "migrate_app", "(", "sender", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", ".", "registration", "import", "registry", "if", "'app_config'", "not", "in", "kwargs", ":", "return", "app_config", "=", "kwargs", "[", "'app_config'", "]", "app_name", "=", "app_config", ".", "label", "fields", "=", "[", "fld", "for", "fld", "in", "list", "(", "registry", ".", "_field_registry", ".", "keys", "(", ")", ")", "if", "fld", ".", "startswith", "(", "app_name", ")", "]", "sid", "=", "transaction", ".", "savepoint", "(", ")", "for", "fld", "in", "fields", ":", "model_name", ",", "field_name", "=", "fld", ".", "split", "(", "'.'", ")", "[", "1", ":", "]", "if", "field_exists", "(", "app_name", ",", "model_name", ",", "field_name", ")", ":", "continue", "model", "=", "app_config", ".", "get_model", "(", "model_name", ")", "try", ":", "with", "connection", ".", "schema_editor", "(", ")", "as", "schema_editor", ":", "schema_editor", ".", "add_field", "(", "model", ",", "registry", ".", "_field_registry", "[", "fld", "]", ")", "if", "sid", ":", "transaction", ".", "savepoint_commit", "(", "sid", ")", "except", "ProgrammingError", ":", "if", "sid", ":", "transaction", ".", "savepoint_rollback", "(", "sid", ")", "continue" ]
Migrate all models of this app registered
[ "Migrate", "all", "models", "of", "this", "app", "registered" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/migration.py#L39-L66
15,043
callowayproject/django-categories
categories/models.py
Category.get_absolute_url
def get_absolute_url(self): """Return a path""" from django.urls import NoReverseMatch if self.alternate_url: return self.alternate_url try: prefix = reverse('categories_tree_list') except NoReverseMatch: prefix = '/' ancestors = list(self.get_ancestors()) + [self, ] return prefix + '/'.join([force_text(i.slug) for i in ancestors]) + '/'
python
def get_absolute_url(self): """Return a path""" from django.urls import NoReverseMatch if self.alternate_url: return self.alternate_url try: prefix = reverse('categories_tree_list') except NoReverseMatch: prefix = '/' ancestors = list(self.get_ancestors()) + [self, ] return prefix + '/'.join([force_text(i.slug) for i in ancestors]) + '/'
[ "def", "get_absolute_url", "(", "self", ")", ":", "from", "django", ".", "urls", "import", "NoReverseMatch", "if", "self", ".", "alternate_url", ":", "return", "self", ".", "alternate_url", "try", ":", "prefix", "=", "reverse", "(", "'categories_tree_list'", ")", "except", "NoReverseMatch", ":", "prefix", "=", "'/'", "ancestors", "=", "list", "(", "self", ".", "get_ancestors", "(", ")", ")", "+", "[", "self", ",", "]", "return", "prefix", "+", "'/'", ".", "join", "(", "[", "force_text", "(", "i", ".", "slug", ")", "for", "i", "in", "ancestors", "]", ")", "+", "'/'" ]
Return a path
[ "Return", "a", "path" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/models.py#L56-L67
15,044
callowayproject/django-categories
categories/models.py
CategoryRelationManager.get_content_type
def get_content_type(self, content_type): """ Get all the items of the given content type related to this item. """ qs = self.get_queryset() return qs.filter(content_type__name=content_type)
python
def get_content_type(self, content_type): """ Get all the items of the given content type related to this item. """ qs = self.get_queryset() return qs.filter(content_type__name=content_type)
[ "def", "get_content_type", "(", "self", ",", "content_type", ")", ":", "qs", "=", "self", ".", "get_queryset", "(", ")", "return", "qs", ".", "filter", "(", "content_type__name", "=", "content_type", ")" ]
Get all the items of the given content type related to this item.
[ "Get", "all", "the", "items", "of", "the", "given", "content", "type", "related", "to", "this", "item", "." ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/models.py#L109-L114
15,045
callowayproject/django-categories
categories/models.py
CategoryRelationManager.get_relation_type
def get_relation_type(self, relation_type): """ Get all the items of the given relationship type related to this item. """ qs = self.get_queryset() return qs.filter(relation_type=relation_type)
python
def get_relation_type(self, relation_type): """ Get all the items of the given relationship type related to this item. """ qs = self.get_queryset() return qs.filter(relation_type=relation_type)
[ "def", "get_relation_type", "(", "self", ",", "relation_type", ")", ":", "qs", "=", "self", ".", "get_queryset", "(", ")", "return", "qs", ".", "filter", "(", "relation_type", "=", "relation_type", ")" ]
Get all the items of the given relationship type related to this item.
[ "Get", "all", "the", "items", "of", "the", "given", "relationship", "type", "related", "to", "this", "item", "." ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/models.py#L116-L121
15,046
callowayproject/django-categories
categories/apps.py
handle_class_prepared
def handle_class_prepared(sender, **kwargs): """ See if this class needs registering of fields """ from .settings import M2M_REGISTRY, FK_REGISTRY from .registration import registry sender_app = sender._meta.app_label sender_name = sender._meta.model_name for key, val in list(FK_REGISTRY.items()): app_name, model_name = key.split('.') if app_name == sender_app and sender_name == model_name: registry.register_model(app_name, sender, 'ForeignKey', val) for key, val in list(M2M_REGISTRY.items()): app_name, model_name = key.split('.') if app_name == sender_app and sender_name == model_name: registry.register_model(app_name, sender, 'ManyToManyField', val)
python
def handle_class_prepared(sender, **kwargs): """ See if this class needs registering of fields """ from .settings import M2M_REGISTRY, FK_REGISTRY from .registration import registry sender_app = sender._meta.app_label sender_name = sender._meta.model_name for key, val in list(FK_REGISTRY.items()): app_name, model_name = key.split('.') if app_name == sender_app and sender_name == model_name: registry.register_model(app_name, sender, 'ForeignKey', val) for key, val in list(M2M_REGISTRY.items()): app_name, model_name = key.split('.') if app_name == sender_app and sender_name == model_name: registry.register_model(app_name, sender, 'ManyToManyField', val)
[ "def", "handle_class_prepared", "(", "sender", ",", "*", "*", "kwargs", ")", ":", "from", ".", "settings", "import", "M2M_REGISTRY", ",", "FK_REGISTRY", "from", ".", "registration", "import", "registry", "sender_app", "=", "sender", ".", "_meta", ".", "app_label", "sender_name", "=", "sender", ".", "_meta", ".", "model_name", "for", "key", ",", "val", "in", "list", "(", "FK_REGISTRY", ".", "items", "(", ")", ")", ":", "app_name", ",", "model_name", "=", "key", ".", "split", "(", "'.'", ")", "if", "app_name", "==", "sender_app", "and", "sender_name", "==", "model_name", ":", "registry", ".", "register_model", "(", "app_name", ",", "sender", ",", "'ForeignKey'", ",", "val", ")", "for", "key", ",", "val", "in", "list", "(", "M2M_REGISTRY", ".", "items", "(", ")", ")", ":", "app_name", ",", "model_name", "=", "key", ".", "split", "(", "'.'", ")", "if", "app_name", "==", "sender_app", "and", "sender_name", "==", "model_name", ":", "registry", ".", "register_model", "(", "app_name", ",", "sender", ",", "'ManyToManyField'", ",", "val", ")" ]
See if this class needs registering of fields
[ "See", "if", "this", "class", "needs", "registering", "of", "fields" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/apps.py#L20-L37
15,047
callowayproject/django-categories
categories/editor/tree_editor.py
TreeEditor.get_queryset
def get_queryset(self, request): """ Returns a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ qs = self.model._default_manager.get_queryset() qs.__class__ = TreeEditorQuerySet return qs
python
def get_queryset(self, request): """ Returns a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view. """ qs = self.model._default_manager.get_queryset() qs.__class__ = TreeEditorQuerySet return qs
[ "def", "get_queryset", "(", "self", ",", "request", ")", ":", "qs", "=", "self", ".", "model", ".", "_default_manager", ".", "get_queryset", "(", ")", "qs", ".", "__class__", "=", "TreeEditorQuerySet", "return", "qs" ]
Returns a QuerySet of all model instances that can be edited by the admin site. This is used by changelist_view.
[ "Returns", "a", "QuerySet", "of", "all", "model", "instances", "that", "can", "be", "edited", "by", "the", "admin", "site", ".", "This", "is", "used", "by", "changelist_view", "." ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/editor/tree_editor.py#L283-L290
15,048
callowayproject/django-categories
categories/base.py
CategoryBaseAdmin.deactivate
def deactivate(self, request, queryset): """ Set active to False for selected items """ selected_cats = self.model.objects.filter( pk__in=[int(x) for x in request.POST.getlist('_selected_action')]) for item in selected_cats: if item.active: item.active = False item.save() item.children.all().update(active=False)
python
def deactivate(self, request, queryset): """ Set active to False for selected items """ selected_cats = self.model.objects.filter( pk__in=[int(x) for x in request.POST.getlist('_selected_action')]) for item in selected_cats: if item.active: item.active = False item.save() item.children.all().update(active=False)
[ "def", "deactivate", "(", "self", ",", "request", ",", "queryset", ")", ":", "selected_cats", "=", "self", ".", "model", ".", "objects", ".", "filter", "(", "pk__in", "=", "[", "int", "(", "x", ")", "for", "x", "in", "request", ".", "POST", ".", "getlist", "(", "'_selected_action'", ")", "]", ")", "for", "item", "in", "selected_cats", ":", "if", "item", ".", "active", ":", "item", ".", "active", "=", "False", "item", ".", "save", "(", ")", "item", ".", "children", ".", "all", "(", ")", ".", "update", "(", "active", "=", "False", ")" ]
Set active to False for selected items
[ "Set", "active", "to", "False", "for", "selected", "items" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/base.py#L144-L155
15,049
callowayproject/django-categories
categories/management/commands/import_categories.py
Command.get_indent
def get_indent(self, string): """ Look through the string and count the spaces """ indent_amt = 0 if string[0] == '\t': return '\t' for char in string: if char == ' ': indent_amt += 1 else: return ' ' * indent_amt
python
def get_indent(self, string): """ Look through the string and count the spaces """ indent_amt = 0 if string[0] == '\t': return '\t' for char in string: if char == ' ': indent_amt += 1 else: return ' ' * indent_amt
[ "def", "get_indent", "(", "self", ",", "string", ")", ":", "indent_amt", "=", "0", "if", "string", "[", "0", "]", "==", "'\\t'", ":", "return", "'\\t'", "for", "char", "in", "string", ":", "if", "char", "==", "' '", ":", "indent_amt", "+=", "1", "else", ":", "return", "' '", "*", "indent_amt" ]
Look through the string and count the spaces
[ "Look", "through", "the", "string", "and", "count", "the", "spaces" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/management/commands/import_categories.py#L16-L28
15,050
callowayproject/django-categories
categories/management/commands/import_categories.py
Command.make_category
def make_category(self, string, parent=None, order=1): """ Make and save a category object from a string """ cat = Category( name=string.strip(), slug=slugify(SLUG_TRANSLITERATOR(string.strip()))[:49], # arent=parent, order=order ) cat._tree_manager.insert_node(cat, parent, 'last-child', True) cat.save() if parent: parent.rght = cat.rght + 1 parent.save() return cat
python
def make_category(self, string, parent=None, order=1): """ Make and save a category object from a string """ cat = Category( name=string.strip(), slug=slugify(SLUG_TRANSLITERATOR(string.strip()))[:49], # arent=parent, order=order ) cat._tree_manager.insert_node(cat, parent, 'last-child', True) cat.save() if parent: parent.rght = cat.rght + 1 parent.save() return cat
[ "def", "make_category", "(", "self", ",", "string", ",", "parent", "=", "None", ",", "order", "=", "1", ")", ":", "cat", "=", "Category", "(", "name", "=", "string", ".", "strip", "(", ")", ",", "slug", "=", "slugify", "(", "SLUG_TRANSLITERATOR", "(", "string", ".", "strip", "(", ")", ")", ")", "[", ":", "49", "]", ",", "# arent=parent,", "order", "=", "order", ")", "cat", ".", "_tree_manager", ".", "insert_node", "(", "cat", ",", "parent", ",", "'last-child'", ",", "True", ")", "cat", ".", "save", "(", ")", "if", "parent", ":", "parent", ".", "rght", "=", "cat", ".", "rght", "+", "1", "parent", ".", "save", "(", ")", "return", "cat" ]
Make and save a category object from a string
[ "Make", "and", "save", "a", "category", "object", "from", "a", "string" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/management/commands/import_categories.py#L31-L46
15,051
callowayproject/django-categories
categories/management/commands/import_categories.py
Command.parse_lines
def parse_lines(self, lines): """ Do the work of parsing each line """ indent = '' level = 0 if lines[0][0] == ' ' or lines[0][0] == '\t': raise CommandError("The first line in the file cannot start with a space or tab.") # This keeps track of the current parents at a given level current_parents = {0: None} for line in lines: if len(line) == 0: continue if line[0] == ' ' or line[0] == '\t': if indent == '': indent = self.get_indent(line) elif not line[0] in indent: raise CommandError("You can't mix spaces and tabs for indents") level = line.count(indent) current_parents[level] = self.make_category(line, parent=current_parents[level - 1]) else: # We are back to a zero level, so reset the whole thing current_parents = {0: self.make_category(line)} current_parents[0]._tree_manager.rebuild()
python
def parse_lines(self, lines): """ Do the work of parsing each line """ indent = '' level = 0 if lines[0][0] == ' ' or lines[0][0] == '\t': raise CommandError("The first line in the file cannot start with a space or tab.") # This keeps track of the current parents at a given level current_parents = {0: None} for line in lines: if len(line) == 0: continue if line[0] == ' ' or line[0] == '\t': if indent == '': indent = self.get_indent(line) elif not line[0] in indent: raise CommandError("You can't mix spaces and tabs for indents") level = line.count(indent) current_parents[level] = self.make_category(line, parent=current_parents[level - 1]) else: # We are back to a zero level, so reset the whole thing current_parents = {0: self.make_category(line)} current_parents[0]._tree_manager.rebuild()
[ "def", "parse_lines", "(", "self", ",", "lines", ")", ":", "indent", "=", "''", "level", "=", "0", "if", "lines", "[", "0", "]", "[", "0", "]", "==", "' '", "or", "lines", "[", "0", "]", "[", "0", "]", "==", "'\\t'", ":", "raise", "CommandError", "(", "\"The first line in the file cannot start with a space or tab.\"", ")", "# This keeps track of the current parents at a given level", "current_parents", "=", "{", "0", ":", "None", "}", "for", "line", "in", "lines", ":", "if", "len", "(", "line", ")", "==", "0", ":", "continue", "if", "line", "[", "0", "]", "==", "' '", "or", "line", "[", "0", "]", "==", "'\\t'", ":", "if", "indent", "==", "''", ":", "indent", "=", "self", ".", "get_indent", "(", "line", ")", "elif", "not", "line", "[", "0", "]", "in", "indent", ":", "raise", "CommandError", "(", "\"You can't mix spaces and tabs for indents\"", ")", "level", "=", "line", ".", "count", "(", "indent", ")", "current_parents", "[", "level", "]", "=", "self", ".", "make_category", "(", "line", ",", "parent", "=", "current_parents", "[", "level", "-", "1", "]", ")", "else", ":", "# We are back to a zero level, so reset the whole thing", "current_parents", "=", "{", "0", ":", "self", ".", "make_category", "(", "line", ")", "}", "current_parents", "[", "0", "]", ".", "_tree_manager", ".", "rebuild", "(", ")" ]
Do the work of parsing each line
[ "Do", "the", "work", "of", "parsing", "each", "line" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/management/commands/import_categories.py#L48-L74
15,052
callowayproject/django-categories
categories/management/commands/import_categories.py
Command.handle
def handle(self, *file_paths, **options): """ Handle the basic import """ import os for file_path in file_paths: if not os.path.isfile(file_path): print("File %s not found." % file_path) continue f = open(file_path, 'r') data = f.readlines() f.close() self.parse_lines(data)
python
def handle(self, *file_paths, **options): """ Handle the basic import """ import os for file_path in file_paths: if not os.path.isfile(file_path): print("File %s not found." % file_path) continue f = open(file_path, 'r') data = f.readlines() f.close() self.parse_lines(data)
[ "def", "handle", "(", "self", ",", "*", "file_paths", ",", "*", "*", "options", ")", ":", "import", "os", "for", "file_path", "in", "file_paths", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "file_path", ")", ":", "print", "(", "\"File %s not found.\"", "%", "file_path", ")", "continue", "f", "=", "open", "(", "file_path", ",", "'r'", ")", "data", "=", "f", ".", "readlines", "(", ")", "f", ".", "close", "(", ")", "self", ".", "parse_lines", "(", "data", ")" ]
Handle the basic import
[ "Handle", "the", "basic", "import" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/management/commands/import_categories.py#L76-L90
15,053
callowayproject/django-categories
categories/templatetags/category_tags.py
get_cat_model
def get_cat_model(model): """ Return a class from a string or class """ try: if isinstance(model, string_types): model_class = apps.get_model(*model.split(".")) elif issubclass(model, CategoryBase): model_class = model if model_class is None: raise TypeError except TypeError: raise TemplateSyntaxError("Unknown model submitted: %s" % model) return model_class
python
def get_cat_model(model): """ Return a class from a string or class """ try: if isinstance(model, string_types): model_class = apps.get_model(*model.split(".")) elif issubclass(model, CategoryBase): model_class = model if model_class is None: raise TypeError except TypeError: raise TemplateSyntaxError("Unknown model submitted: %s" % model) return model_class
[ "def", "get_cat_model", "(", "model", ")", ":", "try", ":", "if", "isinstance", "(", "model", ",", "string_types", ")", ":", "model_class", "=", "apps", ".", "get_model", "(", "*", "model", ".", "split", "(", "\".\"", ")", ")", "elif", "issubclass", "(", "model", ",", "CategoryBase", ")", ":", "model_class", "=", "model", "if", "model_class", "is", "None", ":", "raise", "TypeError", "except", "TypeError", ":", "raise", "TemplateSyntaxError", "(", "\"Unknown model submitted: %s\"", "%", "model", ")", "return", "model_class" ]
Return a class from a string or class
[ "Return", "a", "class", "from", "a", "string", "or", "class" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L29-L42
15,054
callowayproject/django-categories
categories/templatetags/category_tags.py
get_category
def get_category(category_string, model=Category): """ Convert a string, including a path, and return the Category object """ model_class = get_cat_model(model) category = str(category_string).strip("'\"") category = category.strip('/') cat_list = category.split('/') if len(cat_list) == 0: return None try: categories = model_class.objects.filter(name=cat_list[-1], level=len(cat_list) - 1) if len(cat_list) == 1 and len(categories) > 1: return None # If there is only one, use it. If there is more than one, check # if the parent matches the parent passed in the string if len(categories) == 1: return categories[0] else: for item in categories: if item.parent.name == cat_list[-2]: return item except model_class.DoesNotExist: return None
python
def get_category(category_string, model=Category): """ Convert a string, including a path, and return the Category object """ model_class = get_cat_model(model) category = str(category_string).strip("'\"") category = category.strip('/') cat_list = category.split('/') if len(cat_list) == 0: return None try: categories = model_class.objects.filter(name=cat_list[-1], level=len(cat_list) - 1) if len(cat_list) == 1 and len(categories) > 1: return None # If there is only one, use it. If there is more than one, check # if the parent matches the parent passed in the string if len(categories) == 1: return categories[0] else: for item in categories: if item.parent.name == cat_list[-2]: return item except model_class.DoesNotExist: return None
[ "def", "get_category", "(", "category_string", ",", "model", "=", "Category", ")", ":", "model_class", "=", "get_cat_model", "(", "model", ")", "category", "=", "str", "(", "category_string", ")", ".", "strip", "(", "\"'\\\"\"", ")", "category", "=", "category", ".", "strip", "(", "'/'", ")", "cat_list", "=", "category", ".", "split", "(", "'/'", ")", "if", "len", "(", "cat_list", ")", "==", "0", ":", "return", "None", "try", ":", "categories", "=", "model_class", ".", "objects", ".", "filter", "(", "name", "=", "cat_list", "[", "-", "1", "]", ",", "level", "=", "len", "(", "cat_list", ")", "-", "1", ")", "if", "len", "(", "cat_list", ")", "==", "1", "and", "len", "(", "categories", ")", ">", "1", ":", "return", "None", "# If there is only one, use it. If there is more than one, check", "# if the parent matches the parent passed in the string", "if", "len", "(", "categories", ")", "==", "1", ":", "return", "categories", "[", "0", "]", "else", ":", "for", "item", "in", "categories", ":", "if", "item", ".", "parent", ".", "name", "==", "cat_list", "[", "-", "2", "]", ":", "return", "item", "except", "model_class", ".", "DoesNotExist", ":", "return", "None" ]
Convert a string, including a path, and return the Category object
[ "Convert", "a", "string", "including", "a", "path", "and", "return", "the", "Category", "object" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L45-L69
15,055
callowayproject/django-categories
categories/templatetags/category_tags.py
get_category_drilldown
def get_category_drilldown(parser, token): """ Retrieves the specified category, its ancestors and its immediate children as an iterable. Syntax:: {% get_category_drilldown "category name" [using "app.Model"] as varname %} Example:: {% get_category_drilldown "/Grandparent/Parent" [using "app.Model"] as family %} or :: {% get_category_drilldown category_obj as family %} Sets family to:: Grandparent, Parent, Child 1, Child 2, Child n """ bits = token.split_contents() error_str = '%(tagname)s tag should be in the format {%% %(tagname)s ' \ '"category name" [using "app.Model"] as varname %%} or ' \ '{%% %(tagname)s category_obj as varname %%}.' if len(bits) == 4: if bits[2] != 'as': raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]}) if bits[2] == 'as': varname = bits[3].strip("'\"") model = "categories.category" if len(bits) == 6: if bits[2] not in ('using', 'as') or bits[4] not in ('using', 'as'): raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]}) if bits[2] == 'as': varname = bits[3].strip("'\"") model = bits[5].strip("'\"") if bits[2] == 'using': varname = bits[5].strip("'\"") model = bits[3].strip("'\"") category = FilterExpression(bits[1], parser) return CategoryDrillDownNode(category, varname, model)
python
def get_category_drilldown(parser, token): """ Retrieves the specified category, its ancestors and its immediate children as an iterable. Syntax:: {% get_category_drilldown "category name" [using "app.Model"] as varname %} Example:: {% get_category_drilldown "/Grandparent/Parent" [using "app.Model"] as family %} or :: {% get_category_drilldown category_obj as family %} Sets family to:: Grandparent, Parent, Child 1, Child 2, Child n """ bits = token.split_contents() error_str = '%(tagname)s tag should be in the format {%% %(tagname)s ' \ '"category name" [using "app.Model"] as varname %%} or ' \ '{%% %(tagname)s category_obj as varname %%}.' if len(bits) == 4: if bits[2] != 'as': raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]}) if bits[2] == 'as': varname = bits[3].strip("'\"") model = "categories.category" if len(bits) == 6: if bits[2] not in ('using', 'as') or bits[4] not in ('using', 'as'): raise template.TemplateSyntaxError(error_str % {'tagname': bits[0]}) if bits[2] == 'as': varname = bits[3].strip("'\"") model = bits[5].strip("'\"") if bits[2] == 'using': varname = bits[5].strip("'\"") model = bits[3].strip("'\"") category = FilterExpression(bits[1], parser) return CategoryDrillDownNode(category, varname, model)
[ "def", "get_category_drilldown", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "error_str", "=", "'%(tagname)s tag should be in the format {%% %(tagname)s '", "'\"category name\" [using \"app.Model\"] as varname %%} or '", "'{%% %(tagname)s category_obj as varname %%}.'", "if", "len", "(", "bits", ")", "==", "4", ":", "if", "bits", "[", "2", "]", "!=", "'as'", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "error_str", "%", "{", "'tagname'", ":", "bits", "[", "0", "]", "}", ")", "if", "bits", "[", "2", "]", "==", "'as'", ":", "varname", "=", "bits", "[", "3", "]", ".", "strip", "(", "\"'\\\"\"", ")", "model", "=", "\"categories.category\"", "if", "len", "(", "bits", ")", "==", "6", ":", "if", "bits", "[", "2", "]", "not", "in", "(", "'using'", ",", "'as'", ")", "or", "bits", "[", "4", "]", "not", "in", "(", "'using'", ",", "'as'", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "error_str", "%", "{", "'tagname'", ":", "bits", "[", "0", "]", "}", ")", "if", "bits", "[", "2", "]", "==", "'as'", ":", "varname", "=", "bits", "[", "3", "]", ".", "strip", "(", "\"'\\\"\"", ")", "model", "=", "bits", "[", "5", "]", ".", "strip", "(", "\"'\\\"\"", ")", "if", "bits", "[", "2", "]", "==", "'using'", ":", "varname", "=", "bits", "[", "5", "]", ".", "strip", "(", "\"'\\\"\"", ")", "model", "=", "bits", "[", "3", "]", ".", "strip", "(", "\"'\\\"\"", ")", "category", "=", "FilterExpression", "(", "bits", "[", "1", "]", ",", "parser", ")", "return", "CategoryDrillDownNode", "(", "category", ",", "varname", ",", "model", ")" ]
Retrieves the specified category, its ancestors and its immediate children as an iterable. Syntax:: {% get_category_drilldown "category name" [using "app.Model"] as varname %} Example:: {% get_category_drilldown "/Grandparent/Parent" [using "app.Model"] as family %} or :: {% get_category_drilldown category_obj as family %} Sets family to:: Grandparent, Parent, Child 1, Child 2, Child n
[ "Retrieves", "the", "specified", "category", "its", "ancestors", "and", "its", "immediate", "children", "as", "an", "iterable", "." ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L95-L136
15,056
callowayproject/django-categories
categories/templatetags/category_tags.py
get_top_level_categories
def get_top_level_categories(parser, token): """ Retrieves an alphabetical list of all the categories that have no parents. Syntax:: {% get_top_level_categories [using "app.Model"] as categories %} Returns an list of categories [<category>, <category>, <category, ...] """ bits = token.split_contents() usage = 'Usage: {%% %s [using "app.Model"] as <variable> %%}' % bits[0] if len(bits) == 3: if bits[1] != 'as': raise template.TemplateSyntaxError(usage) varname = bits[2] model = "categories.category" elif len(bits) == 5: if bits[1] not in ('as', 'using') and bits[3] not in ('as', 'using'): raise template.TemplateSyntaxError(usage) if bits[1] == 'using': model = bits[2].strip("'\"") varname = bits[4].strip("'\"") else: model = bits[4].strip("'\"") varname = bits[2].strip("'\"") return TopLevelCategoriesNode(varname, model)
python
def get_top_level_categories(parser, token): """ Retrieves an alphabetical list of all the categories that have no parents. Syntax:: {% get_top_level_categories [using "app.Model"] as categories %} Returns an list of categories [<category>, <category>, <category, ...] """ bits = token.split_contents() usage = 'Usage: {%% %s [using "app.Model"] as <variable> %%}' % bits[0] if len(bits) == 3: if bits[1] != 'as': raise template.TemplateSyntaxError(usage) varname = bits[2] model = "categories.category" elif len(bits) == 5: if bits[1] not in ('as', 'using') and bits[3] not in ('as', 'using'): raise template.TemplateSyntaxError(usage) if bits[1] == 'using': model = bits[2].strip("'\"") varname = bits[4].strip("'\"") else: model = bits[4].strip("'\"") varname = bits[2].strip("'\"") return TopLevelCategoriesNode(varname, model)
[ "def", "get_top_level_categories", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "usage", "=", "'Usage: {%% %s [using \"app.Model\"] as <variable> %%}'", "%", "bits", "[", "0", "]", "if", "len", "(", "bits", ")", "==", "3", ":", "if", "bits", "[", "1", "]", "!=", "'as'", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "usage", ")", "varname", "=", "bits", "[", "2", "]", "model", "=", "\"categories.category\"", "elif", "len", "(", "bits", ")", "==", "5", ":", "if", "bits", "[", "1", "]", "not", "in", "(", "'as'", ",", "'using'", ")", "and", "bits", "[", "3", "]", "not", "in", "(", "'as'", ",", "'using'", ")", ":", "raise", "template", ".", "TemplateSyntaxError", "(", "usage", ")", "if", "bits", "[", "1", "]", "==", "'using'", ":", "model", "=", "bits", "[", "2", "]", ".", "strip", "(", "\"'\\\"\"", ")", "varname", "=", "bits", "[", "4", "]", ".", "strip", "(", "\"'\\\"\"", ")", "else", ":", "model", "=", "bits", "[", "4", "]", ".", "strip", "(", "\"'\\\"\"", ")", "varname", "=", "bits", "[", "2", "]", ".", "strip", "(", "\"'\\\"\"", ")", "return", "TopLevelCategoriesNode", "(", "varname", ",", "model", ")" ]
Retrieves an alphabetical list of all the categories that have no parents. Syntax:: {% get_top_level_categories [using "app.Model"] as categories %} Returns an list of categories [<category>, <category>, <category, ...]
[ "Retrieves", "an", "alphabetical", "list", "of", "all", "the", "categories", "that", "have", "no", "parents", "." ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L237-L264
15,057
callowayproject/django-categories
categories/templatetags/category_tags.py
tree_queryset
def tree_queryset(value): """ Converts a normal queryset from an MPTT model to include all the ancestors so a filtered subset of items can be formatted correctly """ from django.db.models.query import QuerySet from copy import deepcopy if not isinstance(value, QuerySet): return value qs = value qs2 = deepcopy(qs) # Reaching into the bowels of query sets to find out whether the qs is # actually filtered and we need to do the INCLUDE_ANCESTORS dance at all. # INCLUDE_ANCESTORS is quite expensive, so don't do it if not needed. is_filtered = bool(qs.query.where.children) if is_filtered: include_pages = set() # Order by 'rght' will return the tree deepest nodes first; # this cuts down the number of queries considerably since all ancestors # will already be in include_pages when they are checked, thus not # trigger additional queries. for p in qs2.order_by('rght').iterator(): if p.parent_id and p.parent_id not in include_pages and p.id not in include_pages: ancestor_id_list = p.get_ancestors().values_list('id', flat=True) include_pages.update(ancestor_id_list) if include_pages: qs = qs | qs.model._default_manager.filter(id__in=include_pages) qs = qs.distinct() return qs
python
def tree_queryset(value): """ Converts a normal queryset from an MPTT model to include all the ancestors so a filtered subset of items can be formatted correctly """ from django.db.models.query import QuerySet from copy import deepcopy if not isinstance(value, QuerySet): return value qs = value qs2 = deepcopy(qs) # Reaching into the bowels of query sets to find out whether the qs is # actually filtered and we need to do the INCLUDE_ANCESTORS dance at all. # INCLUDE_ANCESTORS is quite expensive, so don't do it if not needed. is_filtered = bool(qs.query.where.children) if is_filtered: include_pages = set() # Order by 'rght' will return the tree deepest nodes first; # this cuts down the number of queries considerably since all ancestors # will already be in include_pages when they are checked, thus not # trigger additional queries. for p in qs2.order_by('rght').iterator(): if p.parent_id and p.parent_id not in include_pages and p.id not in include_pages: ancestor_id_list = p.get_ancestors().values_list('id', flat=True) include_pages.update(ancestor_id_list) if include_pages: qs = qs | qs.model._default_manager.filter(id__in=include_pages) qs = qs.distinct() return qs
[ "def", "tree_queryset", "(", "value", ")", ":", "from", "django", ".", "db", ".", "models", ".", "query", "import", "QuerySet", "from", "copy", "import", "deepcopy", "if", "not", "isinstance", "(", "value", ",", "QuerySet", ")", ":", "return", "value", "qs", "=", "value", "qs2", "=", "deepcopy", "(", "qs", ")", "# Reaching into the bowels of query sets to find out whether the qs is", "# actually filtered and we need to do the INCLUDE_ANCESTORS dance at all.", "# INCLUDE_ANCESTORS is quite expensive, so don't do it if not needed.", "is_filtered", "=", "bool", "(", "qs", ".", "query", ".", "where", ".", "children", ")", "if", "is_filtered", ":", "include_pages", "=", "set", "(", ")", "# Order by 'rght' will return the tree deepest nodes first;", "# this cuts down the number of queries considerably since all ancestors", "# will already be in include_pages when they are checked, thus not", "# trigger additional queries.", "for", "p", "in", "qs2", ".", "order_by", "(", "'rght'", ")", ".", "iterator", "(", ")", ":", "if", "p", ".", "parent_id", "and", "p", ".", "parent_id", "not", "in", "include_pages", "and", "p", ".", "id", "not", "in", "include_pages", ":", "ancestor_id_list", "=", "p", ".", "get_ancestors", "(", ")", ".", "values_list", "(", "'id'", ",", "flat", "=", "True", ")", "include_pages", ".", "update", "(", "ancestor_id_list", ")", "if", "include_pages", ":", "qs", "=", "qs", "|", "qs", ".", "model", ".", "_default_manager", ".", "filter", "(", "id__in", "=", "include_pages", ")", "qs", "=", "qs", ".", "distinct", "(", ")", "return", "qs" ]
Converts a normal queryset from an MPTT model to include all the ancestors so a filtered subset of items can be formatted correctly
[ "Converts", "a", "normal", "queryset", "from", "an", "MPTT", "model", "to", "include", "all", "the", "ancestors", "so", "a", "filtered", "subset", "of", "items", "can", "be", "formatted", "correctly" ]
3765851320a79b12c6d3306f3784a2302ea64812
https://github.com/callowayproject/django-categories/blob/3765851320a79b12c6d3306f3784a2302ea64812/categories/templatetags/category_tags.py#L346-L377
15,058
maweigert/gputools
gputools/convolve/convolve.py
convolve
def convolve(data, h, res_g=None, sub_blocks=None): """ convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge. """ if not len(data.shape) in [1, 2, 3]: raise ValueError("dim = %s not supported" % (len(data.shape))) if len(data.shape) != len(h.shape): raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape))) if isinstance(data, OCLArray) and isinstance(h, OCLArray): return _convolve_buf(data, h, res_g) elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray): if sub_blocks == (1,) * len(data.shape) or sub_blocks is None: return _convolve_np(data, h) else: # cut the image into tile and operate on every of them N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)] Npads = [int(s / 2) for s in h.shape] res = np.empty(data.shape, np.float32) for data_tile, data_s_src, data_s_dest \ in tile_iterator(data, blocksize=N_sub, padsize=Npads, mode="constant"): res_tile = _convolve_np(data_tile.copy(), h) res[data_s_src] = res_tile[data_s_dest] return res else: raise TypeError("unknown types (%s, %s)" % (type(data), type(h)))
python
def convolve(data, h, res_g=None, sub_blocks=None): """ convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge. """ if not len(data.shape) in [1, 2, 3]: raise ValueError("dim = %s not supported" % (len(data.shape))) if len(data.shape) != len(h.shape): raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape))) if isinstance(data, OCLArray) and isinstance(h, OCLArray): return _convolve_buf(data, h, res_g) elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray): if sub_blocks == (1,) * len(data.shape) or sub_blocks is None: return _convolve_np(data, h) else: # cut the image into tile and operate on every of them N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)] Npads = [int(s / 2) for s in h.shape] res = np.empty(data.shape, np.float32) for data_tile, data_s_src, data_s_dest \ in tile_iterator(data, blocksize=N_sub, padsize=Npads, mode="constant"): res_tile = _convolve_np(data_tile.copy(), h) res[data_s_src] = res_tile[data_s_dest] return res else: raise TypeError("unknown types (%s, %s)" % (type(data), type(h)))
[ "def", "convolve", "(", "data", ",", "h", ",", "res_g", "=", "None", ",", "sub_blocks", "=", "None", ")", ":", "if", "not", "len", "(", "data", ".", "shape", ")", "in", "[", "1", ",", "2", ",", "3", "]", ":", "raise", "ValueError", "(", "\"dim = %s not supported\"", "%", "(", "len", "(", "data", ".", "shape", ")", ")", ")", "if", "len", "(", "data", ".", "shape", ")", "!=", "len", "(", "h", ".", "shape", ")", ":", "raise", "ValueError", "(", "\"dimemnsion of data (%s) and h (%s) are different\"", "%", "(", "len", "(", "data", ".", "shape", ")", ",", "len", "(", "h", ".", "shape", ")", ")", ")", "if", "isinstance", "(", "data", ",", "OCLArray", ")", "and", "isinstance", "(", "h", ",", "OCLArray", ")", ":", "return", "_convolve_buf", "(", "data", ",", "h", ",", "res_g", ")", "elif", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", "and", "isinstance", "(", "h", ",", "np", ".", "ndarray", ")", ":", "if", "sub_blocks", "==", "(", "1", ",", ")", "*", "len", "(", "data", ".", "shape", ")", "or", "sub_blocks", "is", "None", ":", "return", "_convolve_np", "(", "data", ",", "h", ")", "else", ":", "# cut the image into tile and operate on every of them", "N_sub", "=", "[", "int", "(", "np", ".", "ceil", "(", "1.", "*", "n", "/", "s", ")", ")", "for", "n", ",", "s", "in", "zip", "(", "data", ".", "shape", ",", "sub_blocks", ")", "]", "Npads", "=", "[", "int", "(", "s", "/", "2", ")", "for", "s", "in", "h", ".", "shape", "]", "res", "=", "np", ".", "empty", "(", "data", ".", "shape", ",", "np", ".", "float32", ")", "for", "data_tile", ",", "data_s_src", ",", "data_s_dest", "in", "tile_iterator", "(", "data", ",", "blocksize", "=", "N_sub", ",", "padsize", "=", "Npads", ",", "mode", "=", "\"constant\"", ")", ":", "res_tile", "=", "_convolve_np", "(", "data_tile", ".", "copy", "(", ")", ",", "h", ")", "res", "[", "data_s_src", "]", "=", "res_tile", "[", "data_s_dest", "]", "return", "res", "else", ":", "raise", "TypeError", "(", "\"unknown types (%s, %s)\"", "%", "(", "type", "(", "data", ")", ",", "type", "(", "h", ")", ")", ")" ]
convolves 1d-3d data with kernel h data and h can either be numpy arrays or gpu buffer objects (OCLArray, which must be float32 then) boundary conditions are clamping to zero at edge.
[ "convolves", "1d", "-", "3d", "data", "with", "kernel", "h" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/convolve.py#L18-L54
15,059
maweigert/gputools
gputools/convolve/convolve.py
_convolve3_old
def _convolve3_old(data, h, dev=None): """convolves 3d data with kernel h on the GPU Device dev boundary conditions are clamping to edge. h is converted to float32 if dev == None the default one is used """ if dev is None: dev = get_device() if dev is None: raise ValueError("no OpenCLDevice found...") dtype = data.dtype.type dtypes_options = {np.float32: "", np.uint16: "-D SHORTTYPE"} if not dtype in dtypes_options: raise TypeError("data type %s not supported yet, please convert to:" % dtype, list(dtypes_options.keys())) prog = OCLProgram(abspath("kernels/convolve3.cl"), build_options=dtypes_options[dtype]) hbuf = OCLArray.from_array(h.astype(np.float32)) img = OCLImage.from_array(data) res = OCLArray.empty(data.shape, dtype=np.float32) Ns = [np.int32(n) for n in data.shape + h.shape] prog.run_kernel("convolve3d", img.shape, None, img, hbuf.data, res.data, *Ns) return res.get()
python
def _convolve3_old(data, h, dev=None): """convolves 3d data with kernel h on the GPU Device dev boundary conditions are clamping to edge. h is converted to float32 if dev == None the default one is used """ if dev is None: dev = get_device() if dev is None: raise ValueError("no OpenCLDevice found...") dtype = data.dtype.type dtypes_options = {np.float32: "", np.uint16: "-D SHORTTYPE"} if not dtype in dtypes_options: raise TypeError("data type %s not supported yet, please convert to:" % dtype, list(dtypes_options.keys())) prog = OCLProgram(abspath("kernels/convolve3.cl"), build_options=dtypes_options[dtype]) hbuf = OCLArray.from_array(h.astype(np.float32)) img = OCLImage.from_array(data) res = OCLArray.empty(data.shape, dtype=np.float32) Ns = [np.int32(n) for n in data.shape + h.shape] prog.run_kernel("convolve3d", img.shape, None, img, hbuf.data, res.data, *Ns) return res.get()
[ "def", "_convolve3_old", "(", "data", ",", "h", ",", "dev", "=", "None", ")", ":", "if", "dev", "is", "None", ":", "dev", "=", "get_device", "(", ")", "if", "dev", "is", "None", ":", "raise", "ValueError", "(", "\"no OpenCLDevice found...\"", ")", "dtype", "=", "data", ".", "dtype", ".", "type", "dtypes_options", "=", "{", "np", ".", "float32", ":", "\"\"", ",", "np", ".", "uint16", ":", "\"-D SHORTTYPE\"", "}", "if", "not", "dtype", "in", "dtypes_options", ":", "raise", "TypeError", "(", "\"data type %s not supported yet, please convert to:\"", "%", "dtype", ",", "list", "(", "dtypes_options", ".", "keys", "(", ")", ")", ")", "prog", "=", "OCLProgram", "(", "abspath", "(", "\"kernels/convolve3.cl\"", ")", ",", "build_options", "=", "dtypes_options", "[", "dtype", "]", ")", "hbuf", "=", "OCLArray", ".", "from_array", "(", "h", ".", "astype", "(", "np", ".", "float32", ")", ")", "img", "=", "OCLImage", ".", "from_array", "(", "data", ")", "res", "=", "OCLArray", ".", "empty", "(", "data", ".", "shape", ",", "dtype", "=", "np", ".", "float32", ")", "Ns", "=", "[", "np", ".", "int32", "(", "n", ")", "for", "n", "in", "data", ".", "shape", "+", "h", ".", "shape", "]", "prog", ".", "run_kernel", "(", "\"convolve3d\"", ",", "img", ".", "shape", ",", "None", ",", "img", ",", "hbuf", ".", "data", ",", "res", ".", "data", ",", "*", "Ns", ")", "return", "res", ".", "get", "(", ")" ]
convolves 3d data with kernel h on the GPU Device dev boundary conditions are clamping to edge. h is converted to float32 if dev == None the default one is used
[ "convolves", "3d", "data", "with", "kernel", "h", "on", "the", "GPU", "Device", "dev", "boundary", "conditions", "are", "clamping", "to", "edge", ".", "h", "is", "converted", "to", "float32" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/convolve.py#L116-L151
15,060
maweigert/gputools
gputools/transforms/scale.py
_scale_shape
def _scale_shape(dshape, scale = (1,1,1)): """returns the shape after scaling (should be the same as ndimage.zoom""" nshape = np.round(np.array(dshape) * np.array(scale)) return tuple(nshape.astype(np.int))
python
def _scale_shape(dshape, scale = (1,1,1)): """returns the shape after scaling (should be the same as ndimage.zoom""" nshape = np.round(np.array(dshape) * np.array(scale)) return tuple(nshape.astype(np.int))
[ "def", "_scale_shape", "(", "dshape", ",", "scale", "=", "(", "1", ",", "1", ",", "1", ")", ")", ":", "nshape", "=", "np", ".", "round", "(", "np", ".", "array", "(", "dshape", ")", "*", "np", ".", "array", "(", "scale", ")", ")", "return", "tuple", "(", "nshape", ".", "astype", "(", "np", ".", "int", ")", ")" ]
returns the shape after scaling (should be the same as ndimage.zoom
[ "returns", "the", "shape", "after", "scaling", "(", "should", "be", "the", "same", "as", "ndimage", ".", "zoom" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/transforms/scale.py#L17-L20
15,061
maweigert/gputools
gputools/fft/fftshift.py
fftshift
def fftshift(arr_obj, axes = None, res_g = None, return_buffer = False): """ gpu version of fftshift for numpy arrays or OCLArrays Parameters ---------- arr_obj: numpy array or OCLArray (float32/complex64) the array to be fftshifted axes: list or None the axes over which to shift (like np.fft.fftshift) if None, all axes are taken res_g: if given, fills it with the result (has to be same shape and dtype as arr_obj) else internally creates a new one Returns ------- if return_buffer, returns the result as (well :) OCLArray else returns the result as numpy array """ if axes is None: axes = list(range(arr_obj.ndim)) if isinstance(arr_obj, OCLArray): if not arr_obj.dtype.type in DTYPE_KERNEL_NAMES: raise NotImplementedError("only works for float32 or complex64") elif isinstance(arr_obj, np.ndarray): if np.iscomplexobj(arr_obj): arr_obj = OCLArray.from_array(arr_obj.astype(np.complex64,copy = False)) else: arr_obj = OCLArray.from_array(arr_obj.astype(np.float32,copy = False)) else: raise ValueError("unknown type (%s)"%(type(arr_obj))) if not np.all([arr_obj.shape[a]%2==0 for a in axes]): raise NotImplementedError("only works on axes of even dimensions") if res_g is None: res_g = OCLArray.empty_like(arr_obj) # iterate over all axes # FIXME: this is still rather inefficient in_g = arr_obj for ax in axes: _fftshift_single(in_g, res_g, ax) in_g = res_g if return_buffer: return res_g else: return res_g.get()
python
def fftshift(arr_obj, axes = None, res_g = None, return_buffer = False): """ gpu version of fftshift for numpy arrays or OCLArrays Parameters ---------- arr_obj: numpy array or OCLArray (float32/complex64) the array to be fftshifted axes: list or None the axes over which to shift (like np.fft.fftshift) if None, all axes are taken res_g: if given, fills it with the result (has to be same shape and dtype as arr_obj) else internally creates a new one Returns ------- if return_buffer, returns the result as (well :) OCLArray else returns the result as numpy array """ if axes is None: axes = list(range(arr_obj.ndim)) if isinstance(arr_obj, OCLArray): if not arr_obj.dtype.type in DTYPE_KERNEL_NAMES: raise NotImplementedError("only works for float32 or complex64") elif isinstance(arr_obj, np.ndarray): if np.iscomplexobj(arr_obj): arr_obj = OCLArray.from_array(arr_obj.astype(np.complex64,copy = False)) else: arr_obj = OCLArray.from_array(arr_obj.astype(np.float32,copy = False)) else: raise ValueError("unknown type (%s)"%(type(arr_obj))) if not np.all([arr_obj.shape[a]%2==0 for a in axes]): raise NotImplementedError("only works on axes of even dimensions") if res_g is None: res_g = OCLArray.empty_like(arr_obj) # iterate over all axes # FIXME: this is still rather inefficient in_g = arr_obj for ax in axes: _fftshift_single(in_g, res_g, ax) in_g = res_g if return_buffer: return res_g else: return res_g.get()
[ "def", "fftshift", "(", "arr_obj", ",", "axes", "=", "None", ",", "res_g", "=", "None", ",", "return_buffer", "=", "False", ")", ":", "if", "axes", "is", "None", ":", "axes", "=", "list", "(", "range", "(", "arr_obj", ".", "ndim", ")", ")", "if", "isinstance", "(", "arr_obj", ",", "OCLArray", ")", ":", "if", "not", "arr_obj", ".", "dtype", ".", "type", "in", "DTYPE_KERNEL_NAMES", ":", "raise", "NotImplementedError", "(", "\"only works for float32 or complex64\"", ")", "elif", "isinstance", "(", "arr_obj", ",", "np", ".", "ndarray", ")", ":", "if", "np", ".", "iscomplexobj", "(", "arr_obj", ")", ":", "arr_obj", "=", "OCLArray", ".", "from_array", "(", "arr_obj", ".", "astype", "(", "np", ".", "complex64", ",", "copy", "=", "False", ")", ")", "else", ":", "arr_obj", "=", "OCLArray", ".", "from_array", "(", "arr_obj", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", ")", "else", ":", "raise", "ValueError", "(", "\"unknown type (%s)\"", "%", "(", "type", "(", "arr_obj", ")", ")", ")", "if", "not", "np", ".", "all", "(", "[", "arr_obj", ".", "shape", "[", "a", "]", "%", "2", "==", "0", "for", "a", "in", "axes", "]", ")", ":", "raise", "NotImplementedError", "(", "\"only works on axes of even dimensions\"", ")", "if", "res_g", "is", "None", ":", "res_g", "=", "OCLArray", ".", "empty_like", "(", "arr_obj", ")", "# iterate over all axes", "# FIXME: this is still rather inefficient", "in_g", "=", "arr_obj", "for", "ax", "in", "axes", ":", "_fftshift_single", "(", "in_g", ",", "res_g", ",", "ax", ")", "in_g", "=", "res_g", "if", "return_buffer", ":", "return", "res_g", "else", ":", "return", "res_g", ".", "get", "(", ")" ]
gpu version of fftshift for numpy arrays or OCLArrays Parameters ---------- arr_obj: numpy array or OCLArray (float32/complex64) the array to be fftshifted axes: list or None the axes over which to shift (like np.fft.fftshift) if None, all axes are taken res_g: if given, fills it with the result (has to be same shape and dtype as arr_obj) else internally creates a new one Returns ------- if return_buffer, returns the result as (well :) OCLArray else returns the result as numpy array
[ "gpu", "version", "of", "fftshift", "for", "numpy", "arrays", "or", "OCLArrays" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/fftshift.py#L27-L80
15,062
maweigert/gputools
gputools/fft/fftshift.py
_fftshift_single
def _fftshift_single(d_g, res_g, ax = 0): """ basic fftshift of an OCLArray shape(d_g) = [N_0,N_1...., N, .... N_{k-1, N_k] = [N1, N, N2] the we can address each element in the flat buffer by index = i + N2*j + N2*N*k where i = 1 .. N2 j = 1 .. N k = 1 .. N1 and the swap of elements is performed on the index j """ dtype_kernel_name = {np.float32:"fftshift_1_f", np.complex64:"fftshift_1_c" } N = d_g.shape[ax] N1 = 1 if ax==0 else np.prod(d_g.shape[:ax]) N2 = 1 if ax == len(d_g.shape)-1 else np.prod(d_g.shape[ax+1:]) dtype = d_g.dtype.type prog = OCLProgram(abspath("kernels/fftshift.cl")) prog.run_kernel(dtype_kernel_name[dtype],(N2,N//2,N1),None, d_g.data, res_g.data, np.int32(N), np.int32(N2)) return res_g
python
def _fftshift_single(d_g, res_g, ax = 0): """ basic fftshift of an OCLArray shape(d_g) = [N_0,N_1...., N, .... N_{k-1, N_k] = [N1, N, N2] the we can address each element in the flat buffer by index = i + N2*j + N2*N*k where i = 1 .. N2 j = 1 .. N k = 1 .. N1 and the swap of elements is performed on the index j """ dtype_kernel_name = {np.float32:"fftshift_1_f", np.complex64:"fftshift_1_c" } N = d_g.shape[ax] N1 = 1 if ax==0 else np.prod(d_g.shape[:ax]) N2 = 1 if ax == len(d_g.shape)-1 else np.prod(d_g.shape[ax+1:]) dtype = d_g.dtype.type prog = OCLProgram(abspath("kernels/fftshift.cl")) prog.run_kernel(dtype_kernel_name[dtype],(N2,N//2,N1),None, d_g.data, res_g.data, np.int32(N), np.int32(N2)) return res_g
[ "def", "_fftshift_single", "(", "d_g", ",", "res_g", ",", "ax", "=", "0", ")", ":", "dtype_kernel_name", "=", "{", "np", ".", "float32", ":", "\"fftshift_1_f\"", ",", "np", ".", "complex64", ":", "\"fftshift_1_c\"", "}", "N", "=", "d_g", ".", "shape", "[", "ax", "]", "N1", "=", "1", "if", "ax", "==", "0", "else", "np", ".", "prod", "(", "d_g", ".", "shape", "[", ":", "ax", "]", ")", "N2", "=", "1", "if", "ax", "==", "len", "(", "d_g", ".", "shape", ")", "-", "1", "else", "np", ".", "prod", "(", "d_g", ".", "shape", "[", "ax", "+", "1", ":", "]", ")", "dtype", "=", "d_g", ".", "dtype", ".", "type", "prog", "=", "OCLProgram", "(", "abspath", "(", "\"kernels/fftshift.cl\"", ")", ")", "prog", ".", "run_kernel", "(", "dtype_kernel_name", "[", "dtype", "]", ",", "(", "N2", ",", "N", "//", "2", ",", "N1", ")", ",", "None", ",", "d_g", ".", "data", ",", "res_g", ".", "data", ",", "np", ".", "int32", "(", "N", ")", ",", "np", ".", "int32", "(", "N2", ")", ")", "return", "res_g" ]
basic fftshift of an OCLArray shape(d_g) = [N_0,N_1...., N, .... N_{k-1, N_k] = [N1, N, N2] the we can address each element in the flat buffer by index = i + N2*j + N2*N*k where i = 1 .. N2 j = 1 .. N k = 1 .. N1 and the swap of elements is performed on the index j
[ "basic", "fftshift", "of", "an", "OCLArray" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/fftshift.py#L83-L119
15,063
maweigert/gputools
gputools/fft/oclfft_convolve.py
fft_convolve
def fft_convolve(data, h, res_g = None, plan = None, inplace = False, kernel_is_fft = False, kernel_is_fftshifted = False): """ convolves data with kernel h via FFTs data should be either a numpy array or a OCLArray (see doc for fft) both data and h should be same shape if data/h are OCLArrays, then: - type should be complex64 - shape should be equal and power of two - h is assumed to be already fftshifted (otherwise set kernel_is_fftshifted to true) """ if isinstance(data,np.ndarray): return _fft_convolve_numpy(data, h, plan = plan, kernel_is_fft = kernel_is_fft, kernel_is_fftshifted = kernel_is_fftshifted) elif isinstance(data,OCLArray): return _fft_convolve_gpu(data,h, res_g = res_g, plan = plan, inplace = inplace, kernel_is_fft = kernel_is_fft) else: raise TypeError("array argument (1) has bad type: %s"%type(data))
python
def fft_convolve(data, h, res_g = None, plan = None, inplace = False, kernel_is_fft = False, kernel_is_fftshifted = False): """ convolves data with kernel h via FFTs data should be either a numpy array or a OCLArray (see doc for fft) both data and h should be same shape if data/h are OCLArrays, then: - type should be complex64 - shape should be equal and power of two - h is assumed to be already fftshifted (otherwise set kernel_is_fftshifted to true) """ if isinstance(data,np.ndarray): return _fft_convolve_numpy(data, h, plan = plan, kernel_is_fft = kernel_is_fft, kernel_is_fftshifted = kernel_is_fftshifted) elif isinstance(data,OCLArray): return _fft_convolve_gpu(data,h, res_g = res_g, plan = plan, inplace = inplace, kernel_is_fft = kernel_is_fft) else: raise TypeError("array argument (1) has bad type: %s"%type(data))
[ "def", "fft_convolve", "(", "data", ",", "h", ",", "res_g", "=", "None", ",", "plan", "=", "None", ",", "inplace", "=", "False", ",", "kernel_is_fft", "=", "False", ",", "kernel_is_fftshifted", "=", "False", ")", ":", "if", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", ":", "return", "_fft_convolve_numpy", "(", "data", ",", "h", ",", "plan", "=", "plan", ",", "kernel_is_fft", "=", "kernel_is_fft", ",", "kernel_is_fftshifted", "=", "kernel_is_fftshifted", ")", "elif", "isinstance", "(", "data", ",", "OCLArray", ")", ":", "return", "_fft_convolve_gpu", "(", "data", ",", "h", ",", "res_g", "=", "res_g", ",", "plan", "=", "plan", ",", "inplace", "=", "inplace", ",", "kernel_is_fft", "=", "kernel_is_fft", ")", "else", ":", "raise", "TypeError", "(", "\"array argument (1) has bad type: %s\"", "%", "type", "(", "data", ")", ")" ]
convolves data with kernel h via FFTs data should be either a numpy array or a OCLArray (see doc for fft) both data and h should be same shape if data/h are OCLArrays, then: - type should be complex64 - shape should be equal and power of two - h is assumed to be already fftshifted (otherwise set kernel_is_fftshifted to true)
[ "convolves", "data", "with", "kernel", "h", "via", "FFTs" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/oclfft_convolve.py#L15-L45
15,064
maweigert/gputools
gputools/fft/oclfft_convolve.py
_fft_convolve_numpy
def _fft_convolve_numpy(data, h, plan = None, kernel_is_fft = False, kernel_is_fftshifted = False): """ convolving via opencl fft for numpy arrays data and h must have the same size """ if data.shape != h.shape: raise ValueError("data and kernel must have same size! %s vs %s "%(str(data.shape),str(h.shape))) data_g = OCLArray.from_array(data.astype(np.complex64)) if not kernel_is_fftshifted: h = np.fft.fftshift(h) h_g = OCLArray.from_array(h.astype(np.complex64)) res_g = OCLArray.empty_like(data_g) _fft_convolve_gpu(data_g,h_g,res_g = res_g, plan = plan, kernel_is_fft = kernel_is_fft) res = abs(res_g.get()) del data_g del h_g del res_g return res
python
def _fft_convolve_numpy(data, h, plan = None, kernel_is_fft = False, kernel_is_fftshifted = False): """ convolving via opencl fft for numpy arrays data and h must have the same size """ if data.shape != h.shape: raise ValueError("data and kernel must have same size! %s vs %s "%(str(data.shape),str(h.shape))) data_g = OCLArray.from_array(data.astype(np.complex64)) if not kernel_is_fftshifted: h = np.fft.fftshift(h) h_g = OCLArray.from_array(h.astype(np.complex64)) res_g = OCLArray.empty_like(data_g) _fft_convolve_gpu(data_g,h_g,res_g = res_g, plan = plan, kernel_is_fft = kernel_is_fft) res = abs(res_g.get()) del data_g del h_g del res_g return res
[ "def", "_fft_convolve_numpy", "(", "data", ",", "h", ",", "plan", "=", "None", ",", "kernel_is_fft", "=", "False", ",", "kernel_is_fftshifted", "=", "False", ")", ":", "if", "data", ".", "shape", "!=", "h", ".", "shape", ":", "raise", "ValueError", "(", "\"data and kernel must have same size! %s vs %s \"", "%", "(", "str", "(", "data", ".", "shape", ")", ",", "str", "(", "h", ".", "shape", ")", ")", ")", "data_g", "=", "OCLArray", ".", "from_array", "(", "data", ".", "astype", "(", "np", ".", "complex64", ")", ")", "if", "not", "kernel_is_fftshifted", ":", "h", "=", "np", ".", "fft", ".", "fftshift", "(", "h", ")", "h_g", "=", "OCLArray", ".", "from_array", "(", "h", ".", "astype", "(", "np", ".", "complex64", ")", ")", "res_g", "=", "OCLArray", ".", "empty_like", "(", "data_g", ")", "_fft_convolve_gpu", "(", "data_g", ",", "h_g", ",", "res_g", "=", "res_g", ",", "plan", "=", "plan", ",", "kernel_is_fft", "=", "kernel_is_fft", ")", "res", "=", "abs", "(", "res_g", ".", "get", "(", ")", ")", "del", "data_g", "del", "h_g", "del", "res_g", "return", "res" ]
convolving via opencl fft for numpy arrays data and h must have the same size
[ "convolving", "via", "opencl", "fft", "for", "numpy", "arrays" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/oclfft_convolve.py#L49-L80
15,065
maweigert/gputools
gputools/fft/oclfft_convolve.py
_fft_convolve_gpu
def _fft_convolve_gpu(data_g, h_g, res_g = None, plan = None, inplace = False, kernel_is_fft = False): """ fft convolve for gpu buffer """ assert_bufs_type(np.complex64,data_g,h_g) if data_g.shape != h_g.shape: raise ValueError("data and kernel must have same size! %s vs %s "%(str(data_g.shape),str(h_g.shape))) if plan is None: plan = fft_plan(data_g.shape) if inplace: res_g = data_g else: if res_g is None: res_g = OCLArray.empty(data_g.shape,data_g.dtype) res_g.copy_buffer(data_g) if not kernel_is_fft: kern_g = OCLArray.empty(h_g.shape,h_g.dtype) kern_g.copy_buffer(h_g) fft(kern_g,inplace=True, plan = plan) else: kern_g = h_g fft(res_g,inplace=True, plan = plan) #multiply in fourier domain _complex_multiply_kernel(res_g,kern_g) fft(res_g,inplace = True, inverse = True, plan = plan) return res_g
python
def _fft_convolve_gpu(data_g, h_g, res_g = None, plan = None, inplace = False, kernel_is_fft = False): """ fft convolve for gpu buffer """ assert_bufs_type(np.complex64,data_g,h_g) if data_g.shape != h_g.shape: raise ValueError("data and kernel must have same size! %s vs %s "%(str(data_g.shape),str(h_g.shape))) if plan is None: plan = fft_plan(data_g.shape) if inplace: res_g = data_g else: if res_g is None: res_g = OCLArray.empty(data_g.shape,data_g.dtype) res_g.copy_buffer(data_g) if not kernel_is_fft: kern_g = OCLArray.empty(h_g.shape,h_g.dtype) kern_g.copy_buffer(h_g) fft(kern_g,inplace=True, plan = plan) else: kern_g = h_g fft(res_g,inplace=True, plan = plan) #multiply in fourier domain _complex_multiply_kernel(res_g,kern_g) fft(res_g,inplace = True, inverse = True, plan = plan) return res_g
[ "def", "_fft_convolve_gpu", "(", "data_g", ",", "h_g", ",", "res_g", "=", "None", ",", "plan", "=", "None", ",", "inplace", "=", "False", ",", "kernel_is_fft", "=", "False", ")", ":", "assert_bufs_type", "(", "np", ".", "complex64", ",", "data_g", ",", "h_g", ")", "if", "data_g", ".", "shape", "!=", "h_g", ".", "shape", ":", "raise", "ValueError", "(", "\"data and kernel must have same size! %s vs %s \"", "%", "(", "str", "(", "data_g", ".", "shape", ")", ",", "str", "(", "h_g", ".", "shape", ")", ")", ")", "if", "plan", "is", "None", ":", "plan", "=", "fft_plan", "(", "data_g", ".", "shape", ")", "if", "inplace", ":", "res_g", "=", "data_g", "else", ":", "if", "res_g", "is", "None", ":", "res_g", "=", "OCLArray", ".", "empty", "(", "data_g", ".", "shape", ",", "data_g", ".", "dtype", ")", "res_g", ".", "copy_buffer", "(", "data_g", ")", "if", "not", "kernel_is_fft", ":", "kern_g", "=", "OCLArray", ".", "empty", "(", "h_g", ".", "shape", ",", "h_g", ".", "dtype", ")", "kern_g", ".", "copy_buffer", "(", "h_g", ")", "fft", "(", "kern_g", ",", "inplace", "=", "True", ",", "plan", "=", "plan", ")", "else", ":", "kern_g", "=", "h_g", "fft", "(", "res_g", ",", "inplace", "=", "True", ",", "plan", "=", "plan", ")", "#multiply in fourier domain", "_complex_multiply_kernel", "(", "res_g", ",", "kern_g", ")", "fft", "(", "res_g", ",", "inplace", "=", "True", ",", "inverse", "=", "True", ",", "plan", "=", "plan", ")", "return", "res_g" ]
fft convolve for gpu buffer
[ "fft", "convolve", "for", "gpu", "buffer" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/fft/oclfft_convolve.py#L83-L124
15,066
maweigert/gputools
gputools/convolve/median_filter.py
median_filter
def median_filter(data, size=3, cval = 0, res_g=None, sub_blocks=None): """ median filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider cval: scalar, the constant value for out of border access (cf mode = "constant") res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """ if data.ndim == 2: _filt = make_filter(_median_filter_gpu_2d()) elif data.ndim == 3: _filt = make_filter(_median_filter_gpu_3d()) else: raise ValueError("currently only 2 or 3 dimensional data is supported") return _filt(data=data, size=size, cval = cval, res_g=res_g, sub_blocks=sub_blocks)
python
def median_filter(data, size=3, cval = 0, res_g=None, sub_blocks=None): """ median filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider cval: scalar, the constant value for out of border access (cf mode = "constant") res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """ if data.ndim == 2: _filt = make_filter(_median_filter_gpu_2d()) elif data.ndim == 3: _filt = make_filter(_median_filter_gpu_3d()) else: raise ValueError("currently only 2 or 3 dimensional data is supported") return _filt(data=data, size=size, cval = cval, res_g=res_g, sub_blocks=sub_blocks)
[ "def", "median_filter", "(", "data", ",", "size", "=", "3", ",", "cval", "=", "0", ",", "res_g", "=", "None", ",", "sub_blocks", "=", "None", ")", ":", "if", "data", ".", "ndim", "==", "2", ":", "_filt", "=", "make_filter", "(", "_median_filter_gpu_2d", "(", ")", ")", "elif", "data", ".", "ndim", "==", "3", ":", "_filt", "=", "make_filter", "(", "_median_filter_gpu_3d", "(", ")", ")", "else", ":", "raise", "ValueError", "(", "\"currently only 2 or 3 dimensional data is supported\"", ")", "return", "_filt", "(", "data", "=", "data", ",", "size", "=", "size", ",", "cval", "=", "cval", ",", "res_g", "=", "res_g", ",", "sub_blocks", "=", "sub_blocks", ")" ]
median filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider cval: scalar, the constant value for out of border access (cf mode = "constant") res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray)
[ "median", "filter", "of", "given", "size" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/median_filter.py#L112-L141
15,067
maweigert/gputools
gputools/transforms/transformations.py
rotate
def rotate(data, axis=(1., 0, 0), angle=0., center=None, mode="constant", interpolation="linear"): """ rotates data around axis by a given angle Parameters ---------- data: ndarray 3d array axis: tuple axis to rotate by angle about axis = (x,y,z) angle: float center: tuple or None origin of rotation (cz,cy,cx) in pixels if None, center is the middle of data mode: string boundary mode, one of the following: 'constant' pads with zeros 'edge' pads with edge values 'wrap' pads with the repeated version of the input interpolation, string interpolation mode, one of the following 'linear' 'nearest' Returns ------- res: ndarray rotated array (same shape as input) """ if center is None: center = tuple([s // 2 for s in data.shape]) cx, cy, cz = center m = np.dot(mat4_translate(cx, cy, cz), np.dot(mat4_rotate(angle, *axis), mat4_translate(-cx, -cy, -cz))) m = np.linalg.inv(m) return affine(data, m, mode=mode, interpolation=interpolation)
python
def rotate(data, axis=(1., 0, 0), angle=0., center=None, mode="constant", interpolation="linear"): """ rotates data around axis by a given angle Parameters ---------- data: ndarray 3d array axis: tuple axis to rotate by angle about axis = (x,y,z) angle: float center: tuple or None origin of rotation (cz,cy,cx) in pixels if None, center is the middle of data mode: string boundary mode, one of the following: 'constant' pads with zeros 'edge' pads with edge values 'wrap' pads with the repeated version of the input interpolation, string interpolation mode, one of the following 'linear' 'nearest' Returns ------- res: ndarray rotated array (same shape as input) """ if center is None: center = tuple([s // 2 for s in data.shape]) cx, cy, cz = center m = np.dot(mat4_translate(cx, cy, cz), np.dot(mat4_rotate(angle, *axis), mat4_translate(-cx, -cy, -cz))) m = np.linalg.inv(m) return affine(data, m, mode=mode, interpolation=interpolation)
[ "def", "rotate", "(", "data", ",", "axis", "=", "(", "1.", ",", "0", ",", "0", ")", ",", "angle", "=", "0.", ",", "center", "=", "None", ",", "mode", "=", "\"constant\"", ",", "interpolation", "=", "\"linear\"", ")", ":", "if", "center", "is", "None", ":", "center", "=", "tuple", "(", "[", "s", "//", "2", "for", "s", "in", "data", ".", "shape", "]", ")", "cx", ",", "cy", ",", "cz", "=", "center", "m", "=", "np", ".", "dot", "(", "mat4_translate", "(", "cx", ",", "cy", ",", "cz", ")", ",", "np", ".", "dot", "(", "mat4_rotate", "(", "angle", ",", "*", "axis", ")", ",", "mat4_translate", "(", "-", "cx", ",", "-", "cy", ",", "-", "cz", ")", ")", ")", "m", "=", "np", ".", "linalg", ".", "inv", "(", "m", ")", "return", "affine", "(", "data", ",", "m", ",", "mode", "=", "mode", ",", "interpolation", "=", "interpolation", ")" ]
rotates data around axis by a given angle Parameters ---------- data: ndarray 3d array axis: tuple axis to rotate by angle about axis = (x,y,z) angle: float center: tuple or None origin of rotation (cz,cy,cx) in pixels if None, center is the middle of data mode: string boundary mode, one of the following: 'constant' pads with zeros 'edge' pads with edge values 'wrap' pads with the repeated version of the input interpolation, string interpolation mode, one of the following 'linear' 'nearest' Returns ------- res: ndarray rotated array (same shape as input)
[ "rotates", "data", "around", "axis", "by", "a", "given", "angle" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/transforms/transformations.py#L128-L171
15,068
maweigert/gputools
gputools/transforms/transformations.py
map_coordinates
def map_coordinates(data, coordinates, interpolation="linear", mode='constant'): """ Map data to new coordinates by interpolation. The array of coordinates is used to find, for each point in the output, the corresponding coordinates in the input. should correspond to scipy.ndimage.map_coordinates Parameters ---------- data coordinates output interpolation mode cval prefilter Returns ------- """ if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)): raise ValueError("input data has to be a 2d or 3d array!") coordinates = np.asarray(coordinates, np.int32) if not (coordinates.shape[0] == data.ndim): raise ValueError("coordinate has to be of shape (data.ndim,m) ") interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"], "nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]} mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"], "wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"], "edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"] } if not interpolation in interpolation_defines: raise KeyError( "interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys()))) if not mode in mode_defines: raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys()))) if not data.dtype.type in cl_buffer_datatype_dict: raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys()))) dtype_defines = ["-D", "DTYPE=%s" % cl_buffer_datatype_dict[data.dtype.type]] d_im = OCLImage.from_array(data) coordinates_g = OCLArray.from_array(coordinates.astype(np.float32, copy=False)) res_g = OCLArray.empty(coordinates.shape[1], data.dtype) prog = OCLProgram(abspath("kernels/map_coordinates.cl") , build_options=interpolation_defines[interpolation] + mode_defines[mode] + dtype_defines) kernel = "map_coordinates{ndim}".format(ndim=data.ndim) prog.run_kernel(kernel, (coordinates.shape[-1],), None, d_im, res_g.data, coordinates_g.data) return res_g.get()
python
def map_coordinates(data, coordinates, interpolation="linear", mode='constant'): """ Map data to new coordinates by interpolation. The array of coordinates is used to find, for each point in the output, the corresponding coordinates in the input. should correspond to scipy.ndimage.map_coordinates Parameters ---------- data coordinates output interpolation mode cval prefilter Returns ------- """ if not (isinstance(data, np.ndarray) and data.ndim in (2, 3)): raise ValueError("input data has to be a 2d or 3d array!") coordinates = np.asarray(coordinates, np.int32) if not (coordinates.shape[0] == data.ndim): raise ValueError("coordinate has to be of shape (data.ndim,m) ") interpolation_defines = {"linear": ["-D", "SAMPLER_FILTER=CLK_FILTER_LINEAR"], "nearest": ["-D", "SAMPLER_FILTER=CLK_FILTER_NEAREST"]} mode_defines = {"constant": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP"], "wrap": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT"], "edge": ["-D", "SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE"] } if not interpolation in interpolation_defines: raise KeyError( "interpolation = '%s' not defined ,valid: %s" % (interpolation, list(interpolation_defines.keys()))) if not mode in mode_defines: raise KeyError("mode = '%s' not defined ,valid: %s" % (mode, list(mode_defines.keys()))) if not data.dtype.type in cl_buffer_datatype_dict: raise KeyError("dtype %s not supported yet (%s)" % (data.dtype.type, tuple(cl_buffer_datatype_dict.keys()))) dtype_defines = ["-D", "DTYPE=%s" % cl_buffer_datatype_dict[data.dtype.type]] d_im = OCLImage.from_array(data) coordinates_g = OCLArray.from_array(coordinates.astype(np.float32, copy=False)) res_g = OCLArray.empty(coordinates.shape[1], data.dtype) prog = OCLProgram(abspath("kernels/map_coordinates.cl") , build_options=interpolation_defines[interpolation] + mode_defines[mode] + dtype_defines) kernel = "map_coordinates{ndim}".format(ndim=data.ndim) prog.run_kernel(kernel, (coordinates.shape[-1],), None, d_im, res_g.data, coordinates_g.data) return res_g.get()
[ "def", "map_coordinates", "(", "data", ",", "coordinates", ",", "interpolation", "=", "\"linear\"", ",", "mode", "=", "'constant'", ")", ":", "if", "not", "(", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", "and", "data", ".", "ndim", "in", "(", "2", ",", "3", ")", ")", ":", "raise", "ValueError", "(", "\"input data has to be a 2d or 3d array!\"", ")", "coordinates", "=", "np", ".", "asarray", "(", "coordinates", ",", "np", ".", "int32", ")", "if", "not", "(", "coordinates", ".", "shape", "[", "0", "]", "==", "data", ".", "ndim", ")", ":", "raise", "ValueError", "(", "\"coordinate has to be of shape (data.ndim,m) \"", ")", "interpolation_defines", "=", "{", "\"linear\"", ":", "[", "\"-D\"", ",", "\"SAMPLER_FILTER=CLK_FILTER_LINEAR\"", "]", ",", "\"nearest\"", ":", "[", "\"-D\"", ",", "\"SAMPLER_FILTER=CLK_FILTER_NEAREST\"", "]", "}", "mode_defines", "=", "{", "\"constant\"", ":", "[", "\"-D\"", ",", "\"SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP\"", "]", ",", "\"wrap\"", ":", "[", "\"-D\"", ",", "\"SAMPLER_ADDRESS=CLK_ADDRESS_REPEAT\"", "]", ",", "\"edge\"", ":", "[", "\"-D\"", ",", "\"SAMPLER_ADDRESS=CLK_ADDRESS_CLAMP_TO_EDGE\"", "]", "}", "if", "not", "interpolation", "in", "interpolation_defines", ":", "raise", "KeyError", "(", "\"interpolation = '%s' not defined ,valid: %s\"", "%", "(", "interpolation", ",", "list", "(", "interpolation_defines", ".", "keys", "(", ")", ")", ")", ")", "if", "not", "mode", "in", "mode_defines", ":", "raise", "KeyError", "(", "\"mode = '%s' not defined ,valid: %s\"", "%", "(", "mode", ",", "list", "(", "mode_defines", ".", "keys", "(", ")", ")", ")", ")", "if", "not", "data", ".", "dtype", ".", "type", "in", "cl_buffer_datatype_dict", ":", "raise", "KeyError", "(", "\"dtype %s not supported yet (%s)\"", "%", "(", "data", ".", "dtype", ".", "type", ",", "tuple", "(", "cl_buffer_datatype_dict", ".", "keys", "(", ")", ")", ")", ")", "dtype_defines", "=", "[", "\"-D\"", ",", "\"DTYPE=%s\"", "%", "cl_buffer_datatype_dict", "[", "data", ".", "dtype", ".", "type", "]", "]", "d_im", "=", "OCLImage", ".", "from_array", "(", "data", ")", "coordinates_g", "=", "OCLArray", ".", "from_array", "(", "coordinates", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", ")", "res_g", "=", "OCLArray", ".", "empty", "(", "coordinates", ".", "shape", "[", "1", "]", ",", "data", ".", "dtype", ")", "prog", "=", "OCLProgram", "(", "abspath", "(", "\"kernels/map_coordinates.cl\"", ")", ",", "build_options", "=", "interpolation_defines", "[", "interpolation", "]", "+", "mode_defines", "[", "mode", "]", "+", "dtype_defines", ")", "kernel", "=", "\"map_coordinates{ndim}\"", ".", "format", "(", "ndim", "=", "data", ".", "ndim", ")", "prog", ".", "run_kernel", "(", "kernel", ",", "(", "coordinates", ".", "shape", "[", "-", "1", "]", ",", ")", ",", "None", ",", "d_im", ",", "res_g", ".", "data", ",", "coordinates_g", ".", "data", ")", "return", "res_g", ".", "get", "(", ")" ]
Map data to new coordinates by interpolation. The array of coordinates is used to find, for each point in the output, the corresponding coordinates in the input. should correspond to scipy.ndimage.map_coordinates Parameters ---------- data coordinates output interpolation mode cval prefilter Returns -------
[ "Map", "data", "to", "new", "coordinates", "by", "interpolation", ".", "The", "array", "of", "coordinates", "is", "used", "to", "find", "for", "each", "point", "in", "the", "output", "the", "corresponding", "coordinates", "in", "the", "input", "." ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/transforms/transformations.py#L174-L237
15,069
maweigert/gputools
gputools/utils/utils.py
pad_to_shape
def pad_to_shape(d, dshape, mode = "constant"): """ pad array d to shape dshape """ if d.shape == dshape: return d diff = np.array(dshape)- np.array(d.shape) #first shrink slices = tuple(slice(-x//2,x//2) if x<0 else slice(None,None) for x in diff) res = d[slices] #then pad # return np.pad(res,[(n/2,n-n/2) if n>0 else (0,0) for n in diff],mode=mode) return np.pad(res,[(int(np.ceil(d/2.)),d-int(np.ceil(d/2.))) if d>0 else (0,0) for d in diff],mode=mode)
python
def pad_to_shape(d, dshape, mode = "constant"): """ pad array d to shape dshape """ if d.shape == dshape: return d diff = np.array(dshape)- np.array(d.shape) #first shrink slices = tuple(slice(-x//2,x//2) if x<0 else slice(None,None) for x in diff) res = d[slices] #then pad # return np.pad(res,[(n/2,n-n/2) if n>0 else (0,0) for n in diff],mode=mode) return np.pad(res,[(int(np.ceil(d/2.)),d-int(np.ceil(d/2.))) if d>0 else (0,0) for d in diff],mode=mode)
[ "def", "pad_to_shape", "(", "d", ",", "dshape", ",", "mode", "=", "\"constant\"", ")", ":", "if", "d", ".", "shape", "==", "dshape", ":", "return", "d", "diff", "=", "np", ".", "array", "(", "dshape", ")", "-", "np", ".", "array", "(", "d", ".", "shape", ")", "#first shrink", "slices", "=", "tuple", "(", "slice", "(", "-", "x", "//", "2", ",", "x", "//", "2", ")", "if", "x", "<", "0", "else", "slice", "(", "None", ",", "None", ")", "for", "x", "in", "diff", ")", "res", "=", "d", "[", "slices", "]", "#then pad", "# return np.pad(res,[(n/2,n-n/2) if n>0 else (0,0) for n in diff],mode=mode)", "return", "np", ".", "pad", "(", "res", ",", "[", "(", "int", "(", "np", ".", "ceil", "(", "d", "/", "2.", ")", ")", ",", "d", "-", "int", "(", "np", ".", "ceil", "(", "d", "/", "2.", ")", ")", ")", "if", "d", ">", "0", "else", "(", "0", ",", "0", ")", "for", "d", "in", "diff", "]", ",", "mode", "=", "mode", ")" ]
pad array d to shape dshape
[ "pad", "array", "d", "to", "shape", "dshape" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/utils/utils.py#L4-L17
15,070
maweigert/gputools
gputools/utils/utils.py
pad_to_power2
def pad_to_power2(data, axis = None, mode="constant"): """ pad data to a shape of power 2 if axis == None all axis are padded """ if axis is None: axis = list(range(data.ndim)) if np.all([_is_power2(n) for i, n in enumerate(data.shape) if i in axis]): return data else: return pad_to_shape(data,[(_next_power_of_2(n) if i in axis else n) for i,n in enumerate(data.shape)], mode)
python
def pad_to_power2(data, axis = None, mode="constant"): """ pad data to a shape of power 2 if axis == None all axis are padded """ if axis is None: axis = list(range(data.ndim)) if np.all([_is_power2(n) for i, n in enumerate(data.shape) if i in axis]): return data else: return pad_to_shape(data,[(_next_power_of_2(n) if i in axis else n) for i,n in enumerate(data.shape)], mode)
[ "def", "pad_to_power2", "(", "data", ",", "axis", "=", "None", ",", "mode", "=", "\"constant\"", ")", ":", "if", "axis", "is", "None", ":", "axis", "=", "list", "(", "range", "(", "data", ".", "ndim", ")", ")", "if", "np", ".", "all", "(", "[", "_is_power2", "(", "n", ")", "for", "i", ",", "n", "in", "enumerate", "(", "data", ".", "shape", ")", "if", "i", "in", "axis", "]", ")", ":", "return", "data", "else", ":", "return", "pad_to_shape", "(", "data", ",", "[", "(", "_next_power_of_2", "(", "n", ")", "if", "i", "in", "axis", "else", "n", ")", "for", "i", ",", "n", "in", "enumerate", "(", "data", ".", "shape", ")", "]", ",", "mode", ")" ]
pad data to a shape of power 2 if axis == None all axis are padded
[ "pad", "data", "to", "a", "shape", "of", "power", "2", "if", "axis", "==", "None", "all", "axis", "are", "padded" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/utils/utils.py#L27-L38
15,071
maweigert/gputools
gputools/convolve/generic_separable_filters.py
max_filter
def max_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1)): """ maximum filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """ if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC = "(val>res?val:res)", DEFAULT = "-INFINITY")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC = "(val>res?val:res)", DEFAULT = "-INFINITY")) return _filt(data = data, size = size, res_g = res_g, sub_blocks=sub_blocks)
python
def max_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1)): """ maximum filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """ if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC = "(val>res?val:res)", DEFAULT = "-INFINITY")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC = "(val>res?val:res)", DEFAULT = "-INFINITY")) return _filt(data = data, size = size, res_g = res_g, sub_blocks=sub_blocks)
[ "def", "max_filter", "(", "data", ",", "size", "=", "7", ",", "res_g", "=", "None", ",", "sub_blocks", "=", "(", "1", ",", "1", ",", "1", ")", ")", ":", "if", "data", ".", "ndim", "==", "2", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_2d", "(", "FUNC", "=", "\"(val>res?val:res)\"", ",", "DEFAULT", "=", "\"-INFINITY\"", ")", ")", "elif", "data", ".", "ndim", "==", "3", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_3d", "(", "FUNC", "=", "\"(val>res?val:res)\"", ",", "DEFAULT", "=", "\"-INFINITY\"", ")", ")", "return", "_filt", "(", "data", "=", "data", ",", "size", "=", "size", ",", "res_g", "=", "res_g", ",", "sub_blocks", "=", "sub_blocks", ")" ]
maximum filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray)
[ "maximum", "filter", "of", "given", "size" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/generic_separable_filters.py#L115-L139
15,072
maweigert/gputools
gputools/convolve/generic_separable_filters.py
min_filter
def min_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1)): """ minimum filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """ if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC="(val<res?val:res)", DEFAULT="INFINITY")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC="(val<res?val:res)", DEFAULT="INFINITY")) else: raise ValueError("currently only 2 or 3 dimensional data is supported") return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks)
python
def min_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1)): """ minimum filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """ if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC="(val<res?val:res)", DEFAULT="INFINITY")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC="(val<res?val:res)", DEFAULT="INFINITY")) else: raise ValueError("currently only 2 or 3 dimensional data is supported") return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks)
[ "def", "min_filter", "(", "data", ",", "size", "=", "7", ",", "res_g", "=", "None", ",", "sub_blocks", "=", "(", "1", ",", "1", ",", "1", ")", ")", ":", "if", "data", ".", "ndim", "==", "2", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_2d", "(", "FUNC", "=", "\"(val<res?val:res)\"", ",", "DEFAULT", "=", "\"INFINITY\"", ")", ")", "elif", "data", ".", "ndim", "==", "3", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_3d", "(", "FUNC", "=", "\"(val<res?val:res)\"", ",", "DEFAULT", "=", "\"INFINITY\"", ")", ")", "else", ":", "raise", "ValueError", "(", "\"currently only 2 or 3 dimensional data is supported\"", ")", "return", "_filt", "(", "data", "=", "data", ",", "size", "=", "size", ",", "res_g", "=", "res_g", ",", "sub_blocks", "=", "sub_blocks", ")" ]
minimum filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray)
[ "minimum", "filter", "of", "given", "size" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/generic_separable_filters.py#L142-L167
15,073
maweigert/gputools
gputools/convolve/generic_separable_filters.py
uniform_filter
def uniform_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1), normalized = True): """ mean filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) normalized: bool if True, the filter corresponds to mean if False, the filter corresponds to sum Returns ------- filtered image or None (if OCLArray) """ if normalized: if np.isscalar(size): norm = size else: norm = np.int32(np.prod(size))**(1./len(size)) FUNC = "res+val/%s"%norm else: FUNC = "res+val" if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC=FUNC, DEFAULT="0")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC=FUNC, DEFAULT="0")) res = _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks) return res
python
def uniform_filter(data, size=7, res_g=None, sub_blocks=(1, 1, 1), normalized = True): """ mean filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) normalized: bool if True, the filter corresponds to mean if False, the filter corresponds to sum Returns ------- filtered image or None (if OCLArray) """ if normalized: if np.isscalar(size): norm = size else: norm = np.int32(np.prod(size))**(1./len(size)) FUNC = "res+val/%s"%norm else: FUNC = "res+val" if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC=FUNC, DEFAULT="0")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC=FUNC, DEFAULT="0")) res = _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks) return res
[ "def", "uniform_filter", "(", "data", ",", "size", "=", "7", ",", "res_g", "=", "None", ",", "sub_blocks", "=", "(", "1", ",", "1", ",", "1", ")", ",", "normalized", "=", "True", ")", ":", "if", "normalized", ":", "if", "np", ".", "isscalar", "(", "size", ")", ":", "norm", "=", "size", "else", ":", "norm", "=", "np", ".", "int32", "(", "np", ".", "prod", "(", "size", ")", ")", "**", "(", "1.", "/", "len", "(", "size", ")", ")", "FUNC", "=", "\"res+val/%s\"", "%", "norm", "else", ":", "FUNC", "=", "\"res+val\"", "if", "data", ".", "ndim", "==", "2", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_2d", "(", "FUNC", "=", "FUNC", ",", "DEFAULT", "=", "\"0\"", ")", ")", "elif", "data", ".", "ndim", "==", "3", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_3d", "(", "FUNC", "=", "FUNC", ",", "DEFAULT", "=", "\"0\"", ")", ")", "res", "=", "_filt", "(", "data", "=", "data", ",", "size", "=", "size", ",", "res_g", "=", "res_g", ",", "sub_blocks", "=", "sub_blocks", ")", "return", "res" ]
mean filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) normalized: bool if True, the filter corresponds to mean if False, the filter corresponds to sum Returns ------- filtered image or None (if OCLArray)
[ "mean", "filter", "of", "given", "size" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/generic_separable_filters.py#L171-L210
15,074
maweigert/gputools
gputools/convolve/generic_separable_filters.py
_gauss_filter
def _gauss_filter(data, sigma=4, res_g=None, sub_blocks=(1, 1, 1)): """ gaussian filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """ truncate = 4. radius = tuple(int(truncate*s +0.5) for s in sigma) size = tuple(2*r+1 for r in radius) s = sigma[0] if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f")) else: raise ValueError("currently only 2 or 3 dimensional data is supported") return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks)
python
def _gauss_filter(data, sigma=4, res_g=None, sub_blocks=(1, 1, 1)): """ gaussian filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray) """ truncate = 4. radius = tuple(int(truncate*s +0.5) for s in sigma) size = tuple(2*r+1 for r in radius) s = sigma[0] if data.ndim == 2: _filt = make_filter(_generic_filter_gpu_2d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f")) elif data.ndim == 3: _filt = make_filter(_generic_filter_gpu_3d(FUNC="res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))"%(size[0]//2,size[0]//2,s,s), DEFAULT="0.f")) else: raise ValueError("currently only 2 or 3 dimensional data is supported") return _filt(data=data, size=size, res_g=res_g, sub_blocks=sub_blocks)
[ "def", "_gauss_filter", "(", "data", ",", "sigma", "=", "4", ",", "res_g", "=", "None", ",", "sub_blocks", "=", "(", "1", ",", "1", ",", "1", ")", ")", ":", "truncate", "=", "4.", "radius", "=", "tuple", "(", "int", "(", "truncate", "*", "s", "+", "0.5", ")", "for", "s", "in", "sigma", ")", "size", "=", "tuple", "(", "2", "*", "r", "+", "1", "for", "r", "in", "radius", ")", "s", "=", "sigma", "[", "0", "]", "if", "data", ".", "ndim", "==", "2", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_2d", "(", "FUNC", "=", "\"res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))\"", "%", "(", "size", "[", "0", "]", "//", "2", ",", "size", "[", "0", "]", "//", "2", ",", "s", ",", "s", ")", ",", "DEFAULT", "=", "\"0.f\"", ")", ")", "elif", "data", ".", "ndim", "==", "3", ":", "_filt", "=", "make_filter", "(", "_generic_filter_gpu_3d", "(", "FUNC", "=", "\"res+(val*native_exp((float)(-(ht-%s)*(ht-%s)/2/%s/%s)))\"", "%", "(", "size", "[", "0", "]", "//", "2", ",", "size", "[", "0", "]", "//", "2", ",", "s", ",", "s", ")", ",", "DEFAULT", "=", "\"0.f\"", ")", ")", "else", ":", "raise", "ValueError", "(", "\"currently only 2 or 3 dimensional data is supported\"", ")", "return", "_filt", "(", "data", "=", "data", ",", "size", "=", "size", ",", "res_g", "=", "res_g", ",", "sub_blocks", "=", "sub_blocks", ")" ]
gaussian filter of given size Parameters ---------- data: 2 or 3 dimensional ndarray or OCLArray of type float32 input data size: scalar, tuple the size of the patch to consider res_g: OCLArray store result in buffer if given sub_blocks: perform over subblock tiling (only if data is ndarray) Returns ------- filtered image or None (if OCLArray)
[ "gaussian", "filter", "of", "given", "size" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/convolve/generic_separable_filters.py#L216-L248
15,075
maweigert/gputools
gputools/separable/separable_approx.py
_separable_series2
def _separable_series2(h, N=1): """ finds separable approximations to the 2d function 2d h returns res = (hx, hy)[N] s.t. h \approx sum_i outer(res[i,0],res[i,1]) """ if min(h.shape)<N: raise ValueError("smallest dimension of h is smaller than approximation order! (%s < %s)"%(min(h.shape),N)) U, S, V = linalg.svd(h) hx = [-U[:, n] * np.sqrt(S[n]) for n in range(N)] hy = [-V[n, :] * np.sqrt(S[n]) for n in range(N)] return np.array(list(zip(hx, hy)))
python
def _separable_series2(h, N=1): """ finds separable approximations to the 2d function 2d h returns res = (hx, hy)[N] s.t. h \approx sum_i outer(res[i,0],res[i,1]) """ if min(h.shape)<N: raise ValueError("smallest dimension of h is smaller than approximation order! (%s < %s)"%(min(h.shape),N)) U, S, V = linalg.svd(h) hx = [-U[:, n] * np.sqrt(S[n]) for n in range(N)] hy = [-V[n, :] * np.sqrt(S[n]) for n in range(N)] return np.array(list(zip(hx, hy)))
[ "def", "_separable_series2", "(", "h", ",", "N", "=", "1", ")", ":", "if", "min", "(", "h", ".", "shape", ")", "<", "N", ":", "raise", "ValueError", "(", "\"smallest dimension of h is smaller than approximation order! (%s < %s)\"", "%", "(", "min", "(", "h", ".", "shape", ")", ",", "N", ")", ")", "U", ",", "S", ",", "V", "=", "linalg", ".", "svd", "(", "h", ")", "hx", "=", "[", "-", "U", "[", ":", ",", "n", "]", "*", "np", ".", "sqrt", "(", "S", "[", "n", "]", ")", "for", "n", "in", "range", "(", "N", ")", "]", "hy", "=", "[", "-", "V", "[", "n", ",", ":", "]", "*", "np", ".", "sqrt", "(", "S", "[", "n", "]", ")", "for", "n", "in", "range", "(", "N", ")", "]", "return", "np", ".", "array", "(", "list", "(", "zip", "(", "hx", ",", "hy", ")", ")", ")" ]
finds separable approximations to the 2d function 2d h returns res = (hx, hy)[N] s.t. h \approx sum_i outer(res[i,0],res[i,1])
[ "finds", "separable", "approximations", "to", "the", "2d", "function", "2d", "h" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/separable/separable_approx.py#L16-L29
15,076
maweigert/gputools
gputools/separable/separable_approx.py
_separable_approx2
def _separable_approx2(h, N=1): """ returns the N first approximations to the 2d function h whose sum should be h """ return np.cumsum([np.outer(fy, fx) for fy, fx in _separable_series2(h, N)], 0)
python
def _separable_approx2(h, N=1): """ returns the N first approximations to the 2d function h whose sum should be h """ return np.cumsum([np.outer(fy, fx) for fy, fx in _separable_series2(h, N)], 0)
[ "def", "_separable_approx2", "(", "h", ",", "N", "=", "1", ")", ":", "return", "np", ".", "cumsum", "(", "[", "np", ".", "outer", "(", "fy", ",", "fx", ")", "for", "fy", ",", "fx", "in", "_separable_series2", "(", "h", ",", "N", ")", "]", ",", "0", ")" ]
returns the N first approximations to the 2d function h whose sum should be h
[ "returns", "the", "N", "first", "approximations", "to", "the", "2d", "function", "h", "whose", "sum", "should", "be", "h" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/separable/separable_approx.py#L32-L36
15,077
maweigert/gputools
gputools/separable/separable_approx.py
_separable_approx3
def _separable_approx3(h, N=1): """ returns the N first approximations to the 3d function h """ return np.cumsum([np.einsum("i,j,k", fz, fy, fx) for fz, fy, fx in _separable_series3(h, N)], 0)
python
def _separable_approx3(h, N=1): """ returns the N first approximations to the 3d function h """ return np.cumsum([np.einsum("i,j,k", fz, fy, fx) for fz, fy, fx in _separable_series3(h, N)], 0)
[ "def", "_separable_approx3", "(", "h", ",", "N", "=", "1", ")", ":", "return", "np", ".", "cumsum", "(", "[", "np", ".", "einsum", "(", "\"i,j,k\"", ",", "fz", ",", "fy", ",", "fx", ")", "for", "fz", ",", "fy", ",", "fx", "in", "_separable_series3", "(", "h", ",", "N", ")", "]", ",", "0", ")" ]
returns the N first approximations to the 3d function h
[ "returns", "the", "N", "first", "approximations", "to", "the", "3d", "function", "h" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/separable/separable_approx.py#L85-L88
15,078
maweigert/gputools
gputools/separable/separable_approx.py
separable_approx
def separable_approx(h, N=1): """ finds the k-th rank approximation to h, where k = 1..N similar to separable_series Parameters ---------- h: ndarray input array (2 or 2 dimensional) N: int order of approximation Returns ------- all N apprxoimations res[i], the i-th approximation """ if h.ndim == 2: return _separable_approx2(h, N) elif h.ndim == 3: return _separable_approx3(h, N) else: raise ValueError("unsupported array dimension: %s (only 2d or 3d) " % h.ndim)
python
def separable_approx(h, N=1): """ finds the k-th rank approximation to h, where k = 1..N similar to separable_series Parameters ---------- h: ndarray input array (2 or 2 dimensional) N: int order of approximation Returns ------- all N apprxoimations res[i], the i-th approximation """ if h.ndim == 2: return _separable_approx2(h, N) elif h.ndim == 3: return _separable_approx3(h, N) else: raise ValueError("unsupported array dimension: %s (only 2d or 3d) " % h.ndim)
[ "def", "separable_approx", "(", "h", ",", "N", "=", "1", ")", ":", "if", "h", ".", "ndim", "==", "2", ":", "return", "_separable_approx2", "(", "h", ",", "N", ")", "elif", "h", ".", "ndim", "==", "3", ":", "return", "_separable_approx3", "(", "h", ",", "N", ")", "else", ":", "raise", "ValueError", "(", "\"unsupported array dimension: %s (only 2d or 3d) \"", "%", "h", ".", "ndim", ")" ]
finds the k-th rank approximation to h, where k = 1..N similar to separable_series Parameters ---------- h: ndarray input array (2 or 2 dimensional) N: int order of approximation Returns ------- all N apprxoimations res[i], the i-th approximation
[ "finds", "the", "k", "-", "th", "rank", "approximation", "to", "h", "where", "k", "=", "1", "..", "N" ]
6ab26efeb05dceef74cf13aadeeeb9b009b529dd
https://github.com/maweigert/gputools/blob/6ab26efeb05dceef74cf13aadeeeb9b009b529dd/gputools/separable/separable_approx.py#L127-L150
15,079
alculquicondor/psqlparse
psqlparse/nodes/nodes.py
Node.tables
def tables(self): """ Generic method that does a depth-first search on the node attributes. Child classes should override this method for better performance. """ _tables = set() for attr in six.itervalues(self.__dict__): if isinstance(attr, list): for item in attr: if isinstance(item, Node): _tables |= item.tables() elif isinstance(attr, Node): _tables |= attr.tables() return _tables
python
def tables(self): """ Generic method that does a depth-first search on the node attributes. Child classes should override this method for better performance. """ _tables = set() for attr in six.itervalues(self.__dict__): if isinstance(attr, list): for item in attr: if isinstance(item, Node): _tables |= item.tables() elif isinstance(attr, Node): _tables |= attr.tables() return _tables
[ "def", "tables", "(", "self", ")", ":", "_tables", "=", "set", "(", ")", "for", "attr", "in", "six", ".", "itervalues", "(", "self", ".", "__dict__", ")", ":", "if", "isinstance", "(", "attr", ",", "list", ")", ":", "for", "item", "in", "attr", ":", "if", "isinstance", "(", "item", ",", "Node", ")", ":", "_tables", "|=", "item", ".", "tables", "(", ")", "elif", "isinstance", "(", "attr", ",", "Node", ")", ":", "_tables", "|=", "attr", ".", "tables", "(", ")", "return", "_tables" ]
Generic method that does a depth-first search on the node attributes. Child classes should override this method for better performance.
[ "Generic", "method", "that", "does", "a", "depth", "-", "first", "search", "on", "the", "node", "attributes", "." ]
9c2af04f45ddc4068d7fd87580612457d374e97d
https://github.com/alculquicondor/psqlparse/blob/9c2af04f45ddc4068d7fd87580612457d374e97d/psqlparse/nodes/nodes.py#L6-L22
15,080
sloria/konch
docopt.py
Pattern.fix_identities
def fix_identities(self, uniq=None): """Make pattern-tree tips point to same object if they are equal.""" if not hasattr(self, 'children'): return self uniq = list(set(self.flat())) if uniq is None else uniq for i, child in enumerate(self.children): if not hasattr(child, 'children'): assert child in uniq self.children[i] = uniq[uniq.index(child)] else: child.fix_identities(uniq)
python
def fix_identities(self, uniq=None): """Make pattern-tree tips point to same object if they are equal.""" if not hasattr(self, 'children'): return self uniq = list(set(self.flat())) if uniq is None else uniq for i, child in enumerate(self.children): if not hasattr(child, 'children'): assert child in uniq self.children[i] = uniq[uniq.index(child)] else: child.fix_identities(uniq)
[ "def", "fix_identities", "(", "self", ",", "uniq", "=", "None", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'children'", ")", ":", "return", "self", "uniq", "=", "list", "(", "set", "(", "self", ".", "flat", "(", ")", ")", ")", "if", "uniq", "is", "None", "else", "uniq", "for", "i", ",", "child", "in", "enumerate", "(", "self", ".", "children", ")", ":", "if", "not", "hasattr", "(", "child", ",", "'children'", ")", ":", "assert", "child", "in", "uniq", "self", ".", "children", "[", "i", "]", "=", "uniq", "[", "uniq", ".", "index", "(", "child", ")", "]", "else", ":", "child", ".", "fix_identities", "(", "uniq", ")" ]
Make pattern-tree tips point to same object if they are equal.
[ "Make", "pattern", "-", "tree", "tips", "point", "to", "same", "object", "if", "they", "are", "equal", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/docopt.py#L46-L56
15,081
sloria/konch
setup.py
find_version
def find_version(fname): """Attempts to find the version number in the file names fname. Raises RuntimeError if not found. """ version = "" with open(fname, "r") as fp: reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]') for line in fp: m = reg.match(line) if m: version = m.group(1) break if not version: raise RuntimeError("Cannot find version information") return version
python
def find_version(fname): """Attempts to find the version number in the file names fname. Raises RuntimeError if not found. """ version = "" with open(fname, "r") as fp: reg = re.compile(r'__version__ = [\'"]([^\'"]*)[\'"]') for line in fp: m = reg.match(line) if m: version = m.group(1) break if not version: raise RuntimeError("Cannot find version information") return version
[ "def", "find_version", "(", "fname", ")", ":", "version", "=", "\"\"", "with", "open", "(", "fname", ",", "\"r\"", ")", "as", "fp", ":", "reg", "=", "re", ".", "compile", "(", "r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]'", ")", "for", "line", "in", "fp", ":", "m", "=", "reg", ".", "match", "(", "line", ")", "if", "m", ":", "version", "=", "m", ".", "group", "(", "1", ")", "break", "if", "not", "version", ":", "raise", "RuntimeError", "(", "\"Cannot find version information\"", ")", "return", "version" ]
Attempts to find the version number in the file names fname. Raises RuntimeError if not found.
[ "Attempts", "to", "find", "the", "version", "number", "in", "the", "file", "names", "fname", ".", "Raises", "RuntimeError", "if", "not", "found", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/setup.py#L46-L60
15,082
sloria/konch
konch.py
format_context
def format_context( context: Context, formatter: typing.Union[str, Formatter] = "full" ) -> str: """Output the a context dictionary as a string.""" if not context: return "" if callable(formatter): formatter_func = formatter else: if formatter in CONTEXT_FORMATTERS: formatter_func = CONTEXT_FORMATTERS[formatter] else: raise ValueError(f'Invalid context format: "{formatter}"') return formatter_func(context)
python
def format_context( context: Context, formatter: typing.Union[str, Formatter] = "full" ) -> str: """Output the a context dictionary as a string.""" if not context: return "" if callable(formatter): formatter_func = formatter else: if formatter in CONTEXT_FORMATTERS: formatter_func = CONTEXT_FORMATTERS[formatter] else: raise ValueError(f'Invalid context format: "{formatter}"') return formatter_func(context)
[ "def", "format_context", "(", "context", ":", "Context", ",", "formatter", ":", "typing", ".", "Union", "[", "str", ",", "Formatter", "]", "=", "\"full\"", ")", "->", "str", ":", "if", "not", "context", ":", "return", "\"\"", "if", "callable", "(", "formatter", ")", ":", "formatter_func", "=", "formatter", "else", ":", "if", "formatter", "in", "CONTEXT_FORMATTERS", ":", "formatter_func", "=", "CONTEXT_FORMATTERS", "[", "formatter", "]", "else", ":", "raise", "ValueError", "(", "f'Invalid context format: \"{formatter}\"'", ")", "return", "formatter_func", "(", "context", ")" ]
Output the a context dictionary as a string.
[ "Output", "the", "a", "context", "dictionary", "as", "a", "string", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L245-L259
15,083
sloria/konch
konch.py
make_banner
def make_banner( text: typing.Optional[str] = None, context: typing.Optional[Context] = None, banner_template: typing.Optional[str] = None, context_format: ContextFormat = "full", ) -> str: """Generates a full banner with version info, the given text, and a formatted list of context variables. """ banner_text = text or speak() banner_template = banner_template or BANNER_TEMPLATE ctx = format_context(context or {}, formatter=context_format) out = banner_template.format(version=sys.version, text=banner_text, context=ctx) return out
python
def make_banner( text: typing.Optional[str] = None, context: typing.Optional[Context] = None, banner_template: typing.Optional[str] = None, context_format: ContextFormat = "full", ) -> str: """Generates a full banner with version info, the given text, and a formatted list of context variables. """ banner_text = text or speak() banner_template = banner_template or BANNER_TEMPLATE ctx = format_context(context or {}, formatter=context_format) out = banner_template.format(version=sys.version, text=banner_text, context=ctx) return out
[ "def", "make_banner", "(", "text", ":", "typing", ".", "Optional", "[", "str", "]", "=", "None", ",", "context", ":", "typing", ".", "Optional", "[", "Context", "]", "=", "None", ",", "banner_template", ":", "typing", ".", "Optional", "[", "str", "]", "=", "None", ",", "context_format", ":", "ContextFormat", "=", "\"full\"", ",", ")", "->", "str", ":", "banner_text", "=", "text", "or", "speak", "(", ")", "banner_template", "=", "banner_template", "or", "BANNER_TEMPLATE", "ctx", "=", "format_context", "(", "context", "or", "{", "}", ",", "formatter", "=", "context_format", ")", "out", "=", "banner_template", ".", "format", "(", "version", "=", "sys", ".", "version", ",", "text", "=", "banner_text", ",", "context", "=", "ctx", ")", "return", "out" ]
Generates a full banner with version info, the given text, and a formatted list of context variables.
[ "Generates", "a", "full", "banner", "with", "version", "info", "the", "given", "text", "and", "a", "formatted", "list", "of", "context", "variables", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L269-L282
15,084
sloria/konch
konch.py
config
def config(config_dict: typing.Mapping) -> Config: """Configures the konch shell. This function should be called in a .konchrc file. :param dict config_dict: Dict that may contain 'context', 'banner', and/or 'shell' (default shell class to use). """ logger.debug(f"Updating with {config_dict}") _cfg.update(config_dict) return _cfg
python
def config(config_dict: typing.Mapping) -> Config: """Configures the konch shell. This function should be called in a .konchrc file. :param dict config_dict: Dict that may contain 'context', 'banner', and/or 'shell' (default shell class to use). """ logger.debug(f"Updating with {config_dict}") _cfg.update(config_dict) return _cfg
[ "def", "config", "(", "config_dict", ":", "typing", ".", "Mapping", ")", "->", "Config", ":", "logger", ".", "debug", "(", "f\"Updating with {config_dict}\"", ")", "_cfg", ".", "update", "(", "config_dict", ")", "return", "_cfg" ]
Configures the konch shell. This function should be called in a .konchrc file. :param dict config_dict: Dict that may contain 'context', 'banner', and/or 'shell' (default shell class to use).
[ "Configures", "the", "konch", "shell", ".", "This", "function", "should", "be", "called", "in", "a", ".", "konchrc", "file", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L828-L837
15,085
sloria/konch
konch.py
named_config
def named_config(name: str, config_dict: typing.Mapping) -> None: """Adds a named config to the config registry. The first argument may either be a string or a collection of strings. This function should be called in a .konchrc file. """ names = ( name if isinstance(name, Iterable) and not isinstance(name, (str, bytes)) else [name] ) for each in names: _config_registry[each] = Config(**config_dict)
python
def named_config(name: str, config_dict: typing.Mapping) -> None: """Adds a named config to the config registry. The first argument may either be a string or a collection of strings. This function should be called in a .konchrc file. """ names = ( name if isinstance(name, Iterable) and not isinstance(name, (str, bytes)) else [name] ) for each in names: _config_registry[each] = Config(**config_dict)
[ "def", "named_config", "(", "name", ":", "str", ",", "config_dict", ":", "typing", ".", "Mapping", ")", "->", "None", ":", "names", "=", "(", "name", "if", "isinstance", "(", "name", ",", "Iterable", ")", "and", "not", "isinstance", "(", "name", ",", "(", "str", ",", "bytes", ")", ")", "else", "[", "name", "]", ")", "for", "each", "in", "names", ":", "_config_registry", "[", "each", "]", "=", "Config", "(", "*", "*", "config_dict", ")" ]
Adds a named config to the config registry. The first argument may either be a string or a collection of strings. This function should be called in a .konchrc file.
[ "Adds", "a", "named", "config", "to", "the", "config", "registry", ".", "The", "first", "argument", "may", "either", "be", "a", "string", "or", "a", "collection", "of", "strings", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L840-L852
15,086
sloria/konch
konch.py
__ensure_directory_in_path
def __ensure_directory_in_path(filename: Path) -> None: """Ensures that a file's directory is in the Python path. """ directory = Path(filename).parent.resolve() if directory not in sys.path: logger.debug(f"Adding {directory} to sys.path") sys.path.insert(0, str(directory))
python
def __ensure_directory_in_path(filename: Path) -> None: """Ensures that a file's directory is in the Python path. """ directory = Path(filename).parent.resolve() if directory not in sys.path: logger.debug(f"Adding {directory} to sys.path") sys.path.insert(0, str(directory))
[ "def", "__ensure_directory_in_path", "(", "filename", ":", "Path", ")", "->", "None", ":", "directory", "=", "Path", "(", "filename", ")", ".", "parent", ".", "resolve", "(", ")", "if", "directory", "not", "in", "sys", ".", "path", ":", "logger", ".", "debug", "(", "f\"Adding {directory} to sys.path\"", ")", "sys", ".", "path", ".", "insert", "(", "0", ",", "str", "(", "directory", ")", ")" ]
Ensures that a file's directory is in the Python path.
[ "Ensures", "that", "a", "file", "s", "directory", "is", "in", "the", "Python", "path", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L861-L867
15,087
sloria/konch
konch.py
use_file
def use_file( filename: typing.Union[Path, str, None], trust: bool = False ) -> typing.Union[types.ModuleType, None]: """Load filename as a python file. Import ``filename`` and return it as a module. """ config_file = filename or resolve_path(CONFIG_FILE) def preview_unauthorized() -> None: if not config_file: return None print(SEPARATOR, file=sys.stderr) with Path(config_file).open("r", encoding="utf-8") as fp: for line in fp: print(line, end="", file=sys.stderr) print(SEPARATOR, file=sys.stderr) if config_file and not Path(config_file).exists(): print_error(f'"{filename}" not found.') sys.exit(1) if config_file and Path(config_file).exists(): if not trust: with AuthFile.load() as authfile: try: authfile.check(Path(config_file)) except KonchrcChangedError: print_error(f'"{config_file}" has changed since you last used it.') preview_unauthorized() if confirm("Would you like to authorize it?"): authfile.allow(Path(config_file)) print() else: sys.exit(1) except KonchrcNotAuthorizedError: print_error(f'"{config_file}" is blocked.') preview_unauthorized() if confirm("Would you like to authorize it?"): authfile.allow(Path(config_file)) print() else: sys.exit(1) logger.info(f"Using {config_file}") # Ensure that relative imports are possible __ensure_directory_in_path(Path(config_file)) mod = None try: mod = imp.load_source("konchrc", str(config_file)) except UnboundLocalError: # File not found pass else: return mod if not config_file: print_warning("No konch config file found.") else: print_warning(f'"{config_file}" not found.') return None
python
def use_file( filename: typing.Union[Path, str, None], trust: bool = False ) -> typing.Union[types.ModuleType, None]: """Load filename as a python file. Import ``filename`` and return it as a module. """ config_file = filename or resolve_path(CONFIG_FILE) def preview_unauthorized() -> None: if not config_file: return None print(SEPARATOR, file=sys.stderr) with Path(config_file).open("r", encoding="utf-8") as fp: for line in fp: print(line, end="", file=sys.stderr) print(SEPARATOR, file=sys.stderr) if config_file and not Path(config_file).exists(): print_error(f'"{filename}" not found.') sys.exit(1) if config_file and Path(config_file).exists(): if not trust: with AuthFile.load() as authfile: try: authfile.check(Path(config_file)) except KonchrcChangedError: print_error(f'"{config_file}" has changed since you last used it.') preview_unauthorized() if confirm("Would you like to authorize it?"): authfile.allow(Path(config_file)) print() else: sys.exit(1) except KonchrcNotAuthorizedError: print_error(f'"{config_file}" is blocked.') preview_unauthorized() if confirm("Would you like to authorize it?"): authfile.allow(Path(config_file)) print() else: sys.exit(1) logger.info(f"Using {config_file}") # Ensure that relative imports are possible __ensure_directory_in_path(Path(config_file)) mod = None try: mod = imp.load_source("konchrc", str(config_file)) except UnboundLocalError: # File not found pass else: return mod if not config_file: print_warning("No konch config file found.") else: print_warning(f'"{config_file}" not found.') return None
[ "def", "use_file", "(", "filename", ":", "typing", ".", "Union", "[", "Path", ",", "str", ",", "None", "]", ",", "trust", ":", "bool", "=", "False", ")", "->", "typing", ".", "Union", "[", "types", ".", "ModuleType", ",", "None", "]", ":", "config_file", "=", "filename", "or", "resolve_path", "(", "CONFIG_FILE", ")", "def", "preview_unauthorized", "(", ")", "->", "None", ":", "if", "not", "config_file", ":", "return", "None", "print", "(", "SEPARATOR", ",", "file", "=", "sys", ".", "stderr", ")", "with", "Path", "(", "config_file", ")", ".", "open", "(", "\"r\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "fp", ":", "for", "line", "in", "fp", ":", "print", "(", "line", ",", "end", "=", "\"\"", ",", "file", "=", "sys", ".", "stderr", ")", "print", "(", "SEPARATOR", ",", "file", "=", "sys", ".", "stderr", ")", "if", "config_file", "and", "not", "Path", "(", "config_file", ")", ".", "exists", "(", ")", ":", "print_error", "(", "f'\"{filename}\" not found.'", ")", "sys", ".", "exit", "(", "1", ")", "if", "config_file", "and", "Path", "(", "config_file", ")", ".", "exists", "(", ")", ":", "if", "not", "trust", ":", "with", "AuthFile", ".", "load", "(", ")", "as", "authfile", ":", "try", ":", "authfile", ".", "check", "(", "Path", "(", "config_file", ")", ")", "except", "KonchrcChangedError", ":", "print_error", "(", "f'\"{config_file}\" has changed since you last used it.'", ")", "preview_unauthorized", "(", ")", "if", "confirm", "(", "\"Would you like to authorize it?\"", ")", ":", "authfile", ".", "allow", "(", "Path", "(", "config_file", ")", ")", "print", "(", ")", "else", ":", "sys", ".", "exit", "(", "1", ")", "except", "KonchrcNotAuthorizedError", ":", "print_error", "(", "f'\"{config_file}\" is blocked.'", ")", "preview_unauthorized", "(", ")", "if", "confirm", "(", "\"Would you like to authorize it?\"", ")", ":", "authfile", ".", "allow", "(", "Path", "(", "config_file", ")", ")", "print", "(", ")", "else", ":", "sys", ".", "exit", "(", "1", ")", "logger", ".", "info", "(", "f\"Using {config_file}\"", ")", "# Ensure that relative imports are possible", "__ensure_directory_in_path", "(", "Path", "(", "config_file", ")", ")", "mod", "=", "None", "try", ":", "mod", "=", "imp", ".", "load_source", "(", "\"konchrc\"", ",", "str", "(", "config_file", ")", ")", "except", "UnboundLocalError", ":", "# File not found", "pass", "else", ":", "return", "mod", "if", "not", "config_file", ":", "print_warning", "(", "\"No konch config file found.\"", ")", "else", ":", "print_warning", "(", "f'\"{config_file}\" not found.'", ")", "return", "None" ]
Load filename as a python file. Import ``filename`` and return it as a module.
[ "Load", "filename", "as", "a", "python", "file", ".", "Import", "filename", "and", "return", "it", "as", "a", "module", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L898-L954
15,088
sloria/konch
konch.py
resolve_path
def resolve_path(filename: Path) -> typing.Union[Path, None]: """Find a file by walking up parent directories until the file is found. Return the absolute path of the file. """ current = Path.cwd() # Stop search at home directory sentinel_dir = Path.home().parent.resolve() while current != sentinel_dir: target = Path(current) / Path(filename) if target.exists(): return target.resolve() else: current = current.parent.resolve() return None
python
def resolve_path(filename: Path) -> typing.Union[Path, None]: """Find a file by walking up parent directories until the file is found. Return the absolute path of the file. """ current = Path.cwd() # Stop search at home directory sentinel_dir = Path.home().parent.resolve() while current != sentinel_dir: target = Path(current) / Path(filename) if target.exists(): return target.resolve() else: current = current.parent.resolve() return None
[ "def", "resolve_path", "(", "filename", ":", "Path", ")", "->", "typing", ".", "Union", "[", "Path", ",", "None", "]", ":", "current", "=", "Path", ".", "cwd", "(", ")", "# Stop search at home directory", "sentinel_dir", "=", "Path", ".", "home", "(", ")", ".", "parent", ".", "resolve", "(", ")", "while", "current", "!=", "sentinel_dir", ":", "target", "=", "Path", "(", "current", ")", "/", "Path", "(", "filename", ")", "if", "target", ".", "exists", "(", ")", ":", "return", "target", ".", "resolve", "(", ")", "else", ":", "current", "=", "current", ".", "parent", ".", "resolve", "(", ")", "return", "None" ]
Find a file by walking up parent directories until the file is found. Return the absolute path of the file.
[ "Find", "a", "file", "by", "walking", "up", "parent", "directories", "until", "the", "file", "is", "found", ".", "Return", "the", "absolute", "path", "of", "the", "file", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L957-L971
15,089
sloria/konch
konch.py
parse_args
def parse_args(argv: typing.Optional[typing.Sequence] = None) -> typing.Dict[str, str]: """Exposes the docopt command-line arguments parser. Return a dictionary of arguments. """ return docopt(__doc__, argv=argv, version=__version__)
python
def parse_args(argv: typing.Optional[typing.Sequence] = None) -> typing.Dict[str, str]: """Exposes the docopt command-line arguments parser. Return a dictionary of arguments. """ return docopt(__doc__, argv=argv, version=__version__)
[ "def", "parse_args", "(", "argv", ":", "typing", ".", "Optional", "[", "typing", ".", "Sequence", "]", "=", "None", ")", "->", "typing", ".", "Dict", "[", "str", ",", "str", "]", ":", "return", "docopt", "(", "__doc__", ",", "argv", "=", "argv", ",", "version", "=", "__version__", ")" ]
Exposes the docopt command-line arguments parser. Return a dictionary of arguments.
[ "Exposes", "the", "docopt", "command", "-", "line", "arguments", "parser", ".", "Return", "a", "dictionary", "of", "arguments", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L1132-L1136
15,090
sloria/konch
konch.py
main
def main(argv: typing.Optional[typing.Sequence] = None) -> typing.NoReturn: """Main entry point for the konch CLI.""" args = parse_args(argv) if args["--debug"]: logging.basicConfig( format="%(levelname)s %(filename)s: %(message)s", level=logging.DEBUG ) logger.debug(args) config_file: typing.Union[Path, None] if args["init"]: config_file = Path(args["<config_file>"] or CONFIG_FILE) init_config(config_file) else: config_file = Path(args["<config_file>"]) if args["<config_file>"] else None if args["edit"]: edit_config(config_file) elif args["allow"]: allow_config(config_file) elif args["deny"]: deny_config(config_file) mod = use_file(Path(args["--file"]) if args["--file"] else None) if hasattr(mod, "setup"): mod.setup() # type: ignore if args["--name"]: if args["--name"] not in _config_registry: print_error(f'Invalid --name: "{args["--name"]}"') sys.exit(1) config_dict = _config_registry[args["--name"]] logger.debug(f'Using named config: "{args["--name"]}"') logger.debug(config_dict) else: config_dict = _cfg # Allow default shell to be overriden by command-line argument shell_name = args["--shell"] if shell_name: config_dict["shell"] = SHELL_MAP.get(shell_name.lower(), AutoShell) logger.debug(f"Starting with config {config_dict}") start(**config_dict) if hasattr(mod, "teardown"): mod.teardown() # type: ignore sys.exit(0)
python
def main(argv: typing.Optional[typing.Sequence] = None) -> typing.NoReturn: """Main entry point for the konch CLI.""" args = parse_args(argv) if args["--debug"]: logging.basicConfig( format="%(levelname)s %(filename)s: %(message)s", level=logging.DEBUG ) logger.debug(args) config_file: typing.Union[Path, None] if args["init"]: config_file = Path(args["<config_file>"] or CONFIG_FILE) init_config(config_file) else: config_file = Path(args["<config_file>"]) if args["<config_file>"] else None if args["edit"]: edit_config(config_file) elif args["allow"]: allow_config(config_file) elif args["deny"]: deny_config(config_file) mod = use_file(Path(args["--file"]) if args["--file"] else None) if hasattr(mod, "setup"): mod.setup() # type: ignore if args["--name"]: if args["--name"] not in _config_registry: print_error(f'Invalid --name: "{args["--name"]}"') sys.exit(1) config_dict = _config_registry[args["--name"]] logger.debug(f'Using named config: "{args["--name"]}"') logger.debug(config_dict) else: config_dict = _cfg # Allow default shell to be overriden by command-line argument shell_name = args["--shell"] if shell_name: config_dict["shell"] = SHELL_MAP.get(shell_name.lower(), AutoShell) logger.debug(f"Starting with config {config_dict}") start(**config_dict) if hasattr(mod, "teardown"): mod.teardown() # type: ignore sys.exit(0)
[ "def", "main", "(", "argv", ":", "typing", ".", "Optional", "[", "typing", ".", "Sequence", "]", "=", "None", ")", "->", "typing", ".", "NoReturn", ":", "args", "=", "parse_args", "(", "argv", ")", "if", "args", "[", "\"--debug\"", "]", ":", "logging", ".", "basicConfig", "(", "format", "=", "\"%(levelname)s %(filename)s: %(message)s\"", ",", "level", "=", "logging", ".", "DEBUG", ")", "logger", ".", "debug", "(", "args", ")", "config_file", ":", "typing", ".", "Union", "[", "Path", ",", "None", "]", "if", "args", "[", "\"init\"", "]", ":", "config_file", "=", "Path", "(", "args", "[", "\"<config_file>\"", "]", "or", "CONFIG_FILE", ")", "init_config", "(", "config_file", ")", "else", ":", "config_file", "=", "Path", "(", "args", "[", "\"<config_file>\"", "]", ")", "if", "args", "[", "\"<config_file>\"", "]", "else", "None", "if", "args", "[", "\"edit\"", "]", ":", "edit_config", "(", "config_file", ")", "elif", "args", "[", "\"allow\"", "]", ":", "allow_config", "(", "config_file", ")", "elif", "args", "[", "\"deny\"", "]", ":", "deny_config", "(", "config_file", ")", "mod", "=", "use_file", "(", "Path", "(", "args", "[", "\"--file\"", "]", ")", "if", "args", "[", "\"--file\"", "]", "else", "None", ")", "if", "hasattr", "(", "mod", ",", "\"setup\"", ")", ":", "mod", ".", "setup", "(", ")", "# type: ignore", "if", "args", "[", "\"--name\"", "]", ":", "if", "args", "[", "\"--name\"", "]", "not", "in", "_config_registry", ":", "print_error", "(", "f'Invalid --name: \"{args[\"--name\"]}\"'", ")", "sys", ".", "exit", "(", "1", ")", "config_dict", "=", "_config_registry", "[", "args", "[", "\"--name\"", "]", "]", "logger", ".", "debug", "(", "f'Using named config: \"{args[\"--name\"]}\"'", ")", "logger", ".", "debug", "(", "config_dict", ")", "else", ":", "config_dict", "=", "_cfg", "# Allow default shell to be overriden by command-line argument", "shell_name", "=", "args", "[", "\"--shell\"", "]", "if", "shell_name", ":", "config_dict", "[", "\"shell\"", "]", "=", "SHELL_MAP", ".", "get", "(", "shell_name", ".", "lower", "(", ")", ",", "AutoShell", ")", "logger", ".", "debug", "(", "f\"Starting with config {config_dict}\"", ")", "start", "(", "*", "*", "config_dict", ")", "if", "hasattr", "(", "mod", ",", "\"teardown\"", ")", ":", "mod", ".", "teardown", "(", ")", "# type: ignore", "sys", ".", "exit", "(", "0", ")" ]
Main entry point for the konch CLI.
[ "Main", "entry", "point", "for", "the", "konch", "CLI", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L1139-L1184
15,091
sloria/konch
konch.py
IPythonShell.init_autoreload
def init_autoreload(mode: int) -> None: """Load and initialize the IPython autoreload extension.""" from IPython.extensions import autoreload ip = get_ipython() # type: ignore # noqa: F821 autoreload.load_ipython_extension(ip) ip.magics_manager.magics["line"]["autoreload"](str(mode))
python
def init_autoreload(mode: int) -> None: """Load and initialize the IPython autoreload extension.""" from IPython.extensions import autoreload ip = get_ipython() # type: ignore # noqa: F821 autoreload.load_ipython_extension(ip) ip.magics_manager.magics["line"]["autoreload"](str(mode))
[ "def", "init_autoreload", "(", "mode", ":", "int", ")", "->", "None", ":", "from", "IPython", ".", "extensions", "import", "autoreload", "ip", "=", "get_ipython", "(", ")", "# type: ignore # noqa: F821", "autoreload", ".", "load_ipython_extension", "(", "ip", ")", "ip", ".", "magics_manager", ".", "magics", "[", "\"line\"", "]", "[", "\"autoreload\"", "]", "(", "str", "(", "mode", ")", ")" ]
Load and initialize the IPython autoreload extension.
[ "Load", "and", "initialize", "the", "IPython", "autoreload", "extension", "." ]
15160bd0a0cac967eeeab84794bd6cdd0b5b637d
https://github.com/sloria/konch/blob/15160bd0a0cac967eeeab84794bd6cdd0b5b637d/konch.py#L427-L433
15,092
JamesPHoughton/pysd
pysd/py_backend/vensim/table2py.py
read_tabular
def read_tabular(table_file, sheetname='Sheet1'): """ Reads a vensim syntax model which has been formatted as a table. This is useful in contexts where model building is performed without the aid of Vensim. Parameters ---------- table_file: .csv, .tab or .xls(x) file Table should have columns titled as in the table below | Variable | Equation | Units | Min | Max | Comment | | :------- | :------- | :---- | :-- | :-- | :--------------- | | Age | 5 | Yrs | 0 | inf | How old are you? | | ... | ... | ... | ... | ... | ... | sheetname: basestring if the model is specified in an excel file, what sheet? Returns ------- PySD Model Object Notes ----- Creates an intermediate file in vensim `.mdl` syntax, just so that the existing vensim parsing machinery can be used. """ if isinstance(table_file, str): extension = table_file.split('.')[-1] if extension in ['xls', 'xlsx']: table = pd.read_excel(table_file, sheetname=sheetname) elif extension == 'csv': table = pd.read_csv(table_file, encoding='UTF-8') elif extension == 'tab': table = pd.read_csv(table_file, sep='\t', encoding='UTF-8') else: raise ValueError('Unknown file or table type') else: raise ValueError('Unknown file or table type') if not set(table.columns).issuperset({'Variable', 'Equation'}): raise ValueError('Table must contain at least columns "Variable" and "Equation"') if "Units" not in set(table.columns): warnings.warn('Column for "Units" not found', RuntimeWarning, stacklevel=2) table['Units'] = '' if "Min" not in set(table.columns): warnings.warn('Column for "Min" not found', RuntimeWarning, stacklevel=2) table['Min'] = '' if "Max" not in set(table.columns): warnings.warn('Column for "Max" not found', RuntimeWarning, stacklevel=2) table['Max'] = '' mdl_file = table_file.replace(extension, 'mdl') with open(mdl_file, 'w', encoding='UTF-8') as outfile: for element in table.to_dict(orient='records'): outfile.write( "%(Variable)s = \n" "\t %(Equation)s \n" "\t~\t %(Units)s [%(Min)s, %(Max)s] \n" "\t~\t %(Comment)s \n\t|\n\n" % element ) outfile.write(u'\\\---/// Sketch information - this is where sketch stuff would go.') return read_vensim(mdl_file)
python
def read_tabular(table_file, sheetname='Sheet1'): """ Reads a vensim syntax model which has been formatted as a table. This is useful in contexts where model building is performed without the aid of Vensim. Parameters ---------- table_file: .csv, .tab or .xls(x) file Table should have columns titled as in the table below | Variable | Equation | Units | Min | Max | Comment | | :------- | :------- | :---- | :-- | :-- | :--------------- | | Age | 5 | Yrs | 0 | inf | How old are you? | | ... | ... | ... | ... | ... | ... | sheetname: basestring if the model is specified in an excel file, what sheet? Returns ------- PySD Model Object Notes ----- Creates an intermediate file in vensim `.mdl` syntax, just so that the existing vensim parsing machinery can be used. """ if isinstance(table_file, str): extension = table_file.split('.')[-1] if extension in ['xls', 'xlsx']: table = pd.read_excel(table_file, sheetname=sheetname) elif extension == 'csv': table = pd.read_csv(table_file, encoding='UTF-8') elif extension == 'tab': table = pd.read_csv(table_file, sep='\t', encoding='UTF-8') else: raise ValueError('Unknown file or table type') else: raise ValueError('Unknown file or table type') if not set(table.columns).issuperset({'Variable', 'Equation'}): raise ValueError('Table must contain at least columns "Variable" and "Equation"') if "Units" not in set(table.columns): warnings.warn('Column for "Units" not found', RuntimeWarning, stacklevel=2) table['Units'] = '' if "Min" not in set(table.columns): warnings.warn('Column for "Min" not found', RuntimeWarning, stacklevel=2) table['Min'] = '' if "Max" not in set(table.columns): warnings.warn('Column for "Max" not found', RuntimeWarning, stacklevel=2) table['Max'] = '' mdl_file = table_file.replace(extension, 'mdl') with open(mdl_file, 'w', encoding='UTF-8') as outfile: for element in table.to_dict(orient='records'): outfile.write( "%(Variable)s = \n" "\t %(Equation)s \n" "\t~\t %(Units)s [%(Min)s, %(Max)s] \n" "\t~\t %(Comment)s \n\t|\n\n" % element ) outfile.write(u'\\\---/// Sketch information - this is where sketch stuff would go.') return read_vensim(mdl_file)
[ "def", "read_tabular", "(", "table_file", ",", "sheetname", "=", "'Sheet1'", ")", ":", "if", "isinstance", "(", "table_file", ",", "str", ")", ":", "extension", "=", "table_file", ".", "split", "(", "'.'", ")", "[", "-", "1", "]", "if", "extension", "in", "[", "'xls'", ",", "'xlsx'", "]", ":", "table", "=", "pd", ".", "read_excel", "(", "table_file", ",", "sheetname", "=", "sheetname", ")", "elif", "extension", "==", "'csv'", ":", "table", "=", "pd", ".", "read_csv", "(", "table_file", ",", "encoding", "=", "'UTF-8'", ")", "elif", "extension", "==", "'tab'", ":", "table", "=", "pd", ".", "read_csv", "(", "table_file", ",", "sep", "=", "'\\t'", ",", "encoding", "=", "'UTF-8'", ")", "else", ":", "raise", "ValueError", "(", "'Unknown file or table type'", ")", "else", ":", "raise", "ValueError", "(", "'Unknown file or table type'", ")", "if", "not", "set", "(", "table", ".", "columns", ")", ".", "issuperset", "(", "{", "'Variable'", ",", "'Equation'", "}", ")", ":", "raise", "ValueError", "(", "'Table must contain at least columns \"Variable\" and \"Equation\"'", ")", "if", "\"Units\"", "not", "in", "set", "(", "table", ".", "columns", ")", ":", "warnings", ".", "warn", "(", "'Column for \"Units\" not found'", ",", "RuntimeWarning", ",", "stacklevel", "=", "2", ")", "table", "[", "'Units'", "]", "=", "''", "if", "\"Min\"", "not", "in", "set", "(", "table", ".", "columns", ")", ":", "warnings", ".", "warn", "(", "'Column for \"Min\" not found'", ",", "RuntimeWarning", ",", "stacklevel", "=", "2", ")", "table", "[", "'Min'", "]", "=", "''", "if", "\"Max\"", "not", "in", "set", "(", "table", ".", "columns", ")", ":", "warnings", ".", "warn", "(", "'Column for \"Max\" not found'", ",", "RuntimeWarning", ",", "stacklevel", "=", "2", ")", "table", "[", "'Max'", "]", "=", "''", "mdl_file", "=", "table_file", ".", "replace", "(", "extension", ",", "'mdl'", ")", "with", "open", "(", "mdl_file", ",", "'w'", ",", "encoding", "=", "'UTF-8'", ")", "as", "outfile", ":", "for", "element", "in", "table", ".", "to_dict", "(", "orient", "=", "'records'", ")", ":", "outfile", ".", "write", "(", "\"%(Variable)s = \\n\"", "\"\\t %(Equation)s \\n\"", "\"\\t~\\t %(Units)s [%(Min)s, %(Max)s] \\n\"", "\"\\t~\\t %(Comment)s \\n\\t|\\n\\n\"", "%", "element", ")", "outfile", ".", "write", "(", "u'\\\\\\---/// Sketch information - this is where sketch stuff would go.'", ")", "return", "read_vensim", "(", "mdl_file", ")" ]
Reads a vensim syntax model which has been formatted as a table. This is useful in contexts where model building is performed without the aid of Vensim. Parameters ---------- table_file: .csv, .tab or .xls(x) file Table should have columns titled as in the table below | Variable | Equation | Units | Min | Max | Comment | | :------- | :------- | :---- | :-- | :-- | :--------------- | | Age | 5 | Yrs | 0 | inf | How old are you? | | ... | ... | ... | ... | ... | ... | sheetname: basestring if the model is specified in an excel file, what sheet? Returns ------- PySD Model Object Notes ----- Creates an intermediate file in vensim `.mdl` syntax, just so that the existing vensim parsing machinery can be used.
[ "Reads", "a", "vensim", "syntax", "model", "which", "has", "been", "formatted", "as", "a", "table", "." ]
bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/vensim/table2py.py#L6-L80
15,093
JamesPHoughton/pysd
pysd/pysd.py
read_xmile
def read_xmile(xmile_file): """ Construct a model object from `.xmile` file. """ from . import py_backend from .py_backend.xmile.xmile2py import translate_xmile py_model_file = translate_xmile(xmile_file) model = load(py_model_file) model.xmile_file = xmile_file return model
python
def read_xmile(xmile_file): """ Construct a model object from `.xmile` file. """ from . import py_backend from .py_backend.xmile.xmile2py import translate_xmile py_model_file = translate_xmile(xmile_file) model = load(py_model_file) model.xmile_file = xmile_file return model
[ "def", "read_xmile", "(", "xmile_file", ")", ":", "from", ".", "import", "py_backend", "from", ".", "py_backend", ".", "xmile", ".", "xmile2py", "import", "translate_xmile", "py_model_file", "=", "translate_xmile", "(", "xmile_file", ")", "model", "=", "load", "(", "py_model_file", ")", "model", ".", "xmile_file", "=", "xmile_file", "return", "model" ]
Construct a model object from `.xmile` file.
[ "Construct", "a", "model", "object", "from", ".", "xmile", "file", "." ]
bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/pysd.py#L16-L23
15,094
JamesPHoughton/pysd
pysd/pysd.py
read_vensim
def read_vensim(mdl_file): """ Construct a model from Vensim `.mdl` file. Parameters ---------- mdl_file : <string> The relative path filename for a raw Vensim `.mdl` file Returns ------- model: a PySD class object Elements from the python model are loaded into the PySD class and ready to run Examples -------- >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') """ from .py_backend.vensim.vensim2py import translate_vensim from .py_backend import functions py_model_file = translate_vensim(mdl_file) model = functions.Model(py_model_file) model.mdl_file = mdl_file return model
python
def read_vensim(mdl_file): """ Construct a model from Vensim `.mdl` file. Parameters ---------- mdl_file : <string> The relative path filename for a raw Vensim `.mdl` file Returns ------- model: a PySD class object Elements from the python model are loaded into the PySD class and ready to run Examples -------- >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl') """ from .py_backend.vensim.vensim2py import translate_vensim from .py_backend import functions py_model_file = translate_vensim(mdl_file) model = functions.Model(py_model_file) model.mdl_file = mdl_file return model
[ "def", "read_vensim", "(", "mdl_file", ")", ":", "from", ".", "py_backend", ".", "vensim", ".", "vensim2py", "import", "translate_vensim", "from", ".", "py_backend", "import", "functions", "py_model_file", "=", "translate_vensim", "(", "mdl_file", ")", "model", "=", "functions", ".", "Model", "(", "py_model_file", ")", "model", ".", "mdl_file", "=", "mdl_file", "return", "model" ]
Construct a model from Vensim `.mdl` file. Parameters ---------- mdl_file : <string> The relative path filename for a raw Vensim `.mdl` file Returns ------- model: a PySD class object Elements from the python model are loaded into the PySD class and ready to run Examples -------- >>> model = read_vensim('../tests/test-models/samples/teacup/teacup.mdl')
[ "Construct", "a", "model", "from", "Vensim", ".", "mdl", "file", "." ]
bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/pysd.py#L25-L49
15,095
JamesPHoughton/pysd
pysd/py_backend/functions.py
cache
def cache(horizon): """ Put a wrapper around a model function Decorators with parameters are tricky, you have to essentially create a decorator that returns a decorator, which itself then returns the function wrapper. Parameters ---------- horizon: string - 'step' means cache just until the next timestep - 'run' means cache until the next initialization of the model Returns ------- new_func: decorated function function wrapping the original function, handling caching """ def cache_step(func): """ Decorator for caching at a step level""" @wraps(func) def cached(*args): """Step wise cache function""" try: # fails if cache is out of date or not instantiated data = func.__globals__['__data'] assert cached.cache_t == data['time']() assert hasattr(cached, 'cache_val') assert cached.cache_val is not None except (AssertionError, AttributeError): cached.cache_val = func(*args) data = func.__globals__['__data'] cached.cache_t = data['time']() return cached.cache_val return cached def cache_run(func): """ Decorator for caching at the run level""" @wraps(func) def cached(*args): """Run wise cache function""" try: # fails if cache is not instantiated return cached.cache_val except AttributeError: cached.cache_val = func(*args) return cached.cache_val return cached if horizon == 'step': return cache_step elif horizon == 'run': return cache_run else: raise (AttributeError('Bad horizon for cache decorator'))
python
def cache(horizon): """ Put a wrapper around a model function Decorators with parameters are tricky, you have to essentially create a decorator that returns a decorator, which itself then returns the function wrapper. Parameters ---------- horizon: string - 'step' means cache just until the next timestep - 'run' means cache until the next initialization of the model Returns ------- new_func: decorated function function wrapping the original function, handling caching """ def cache_step(func): """ Decorator for caching at a step level""" @wraps(func) def cached(*args): """Step wise cache function""" try: # fails if cache is out of date or not instantiated data = func.__globals__['__data'] assert cached.cache_t == data['time']() assert hasattr(cached, 'cache_val') assert cached.cache_val is not None except (AssertionError, AttributeError): cached.cache_val = func(*args) data = func.__globals__['__data'] cached.cache_t = data['time']() return cached.cache_val return cached def cache_run(func): """ Decorator for caching at the run level""" @wraps(func) def cached(*args): """Run wise cache function""" try: # fails if cache is not instantiated return cached.cache_val except AttributeError: cached.cache_val = func(*args) return cached.cache_val return cached if horizon == 'step': return cache_step elif horizon == 'run': return cache_run else: raise (AttributeError('Bad horizon for cache decorator'))
[ "def", "cache", "(", "horizon", ")", ":", "def", "cache_step", "(", "func", ")", ":", "\"\"\" Decorator for caching at a step level\"\"\"", "@", "wraps", "(", "func", ")", "def", "cached", "(", "*", "args", ")", ":", "\"\"\"Step wise cache function\"\"\"", "try", ":", "# fails if cache is out of date or not instantiated", "data", "=", "func", ".", "__globals__", "[", "'__data'", "]", "assert", "cached", ".", "cache_t", "==", "data", "[", "'time'", "]", "(", ")", "assert", "hasattr", "(", "cached", ",", "'cache_val'", ")", "assert", "cached", ".", "cache_val", "is", "not", "None", "except", "(", "AssertionError", ",", "AttributeError", ")", ":", "cached", ".", "cache_val", "=", "func", "(", "*", "args", ")", "data", "=", "func", ".", "__globals__", "[", "'__data'", "]", "cached", ".", "cache_t", "=", "data", "[", "'time'", "]", "(", ")", "return", "cached", ".", "cache_val", "return", "cached", "def", "cache_run", "(", "func", ")", ":", "\"\"\" Decorator for caching at the run level\"\"\"", "@", "wraps", "(", "func", ")", "def", "cached", "(", "*", "args", ")", ":", "\"\"\"Run wise cache function\"\"\"", "try", ":", "# fails if cache is not instantiated", "return", "cached", ".", "cache_val", "except", "AttributeError", ":", "cached", ".", "cache_val", "=", "func", "(", "*", "args", ")", "return", "cached", ".", "cache_val", "return", "cached", "if", "horizon", "==", "'step'", ":", "return", "cache_step", "elif", "horizon", "==", "'run'", ":", "return", "cache_run", "else", ":", "raise", "(", "AttributeError", "(", "'Bad horizon for cache decorator'", ")", ")" ]
Put a wrapper around a model function Decorators with parameters are tricky, you have to essentially create a decorator that returns a decorator, which itself then returns the function wrapper. Parameters ---------- horizon: string - 'step' means cache just until the next timestep - 'run' means cache until the next initialization of the model Returns ------- new_func: decorated function function wrapping the original function, handling caching
[ "Put", "a", "wrapper", "around", "a", "model", "function" ]
bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L44-L105
15,096
JamesPHoughton/pysd
pysd/py_backend/functions.py
ramp
def ramp(time, slope, start, finish=0): """ Implements vensim's and xmile's RAMP function Parameters ---------- time: function The current time of modelling slope: float The slope of the ramp starting at zero at time start start: float Time at which the ramp begins finish: float Optional. Time at which the ramp ends Returns ------- response: float If prior to ramp start, returns zero If after ramp ends, returns top of ramp Examples -------- """ t = time() if t < start: return 0 else: if finish <= 0: return slope * (t - start) elif t > finish: return slope * (finish - start) else: return slope * (t - start)
python
def ramp(time, slope, start, finish=0): """ Implements vensim's and xmile's RAMP function Parameters ---------- time: function The current time of modelling slope: float The slope of the ramp starting at zero at time start start: float Time at which the ramp begins finish: float Optional. Time at which the ramp ends Returns ------- response: float If prior to ramp start, returns zero If after ramp ends, returns top of ramp Examples -------- """ t = time() if t < start: return 0 else: if finish <= 0: return slope * (t - start) elif t > finish: return slope * (finish - start) else: return slope * (t - start)
[ "def", "ramp", "(", "time", ",", "slope", ",", "start", ",", "finish", "=", "0", ")", ":", "t", "=", "time", "(", ")", "if", "t", "<", "start", ":", "return", "0", "else", ":", "if", "finish", "<=", "0", ":", "return", "slope", "*", "(", "t", "-", "start", ")", "elif", "t", ">", "finish", ":", "return", "slope", "*", "(", "finish", "-", "start", ")", "else", ":", "return", "slope", "*", "(", "t", "-", "start", ")" ]
Implements vensim's and xmile's RAMP function Parameters ---------- time: function The current time of modelling slope: float The slope of the ramp starting at zero at time start start: float Time at which the ramp begins finish: float Optional. Time at which the ramp ends Returns ------- response: float If prior to ramp start, returns zero If after ramp ends, returns top of ramp Examples --------
[ "Implements", "vensim", "s", "and", "xmile", "s", "RAMP", "function" ]
bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L803-L837
15,097
JamesPHoughton/pysd
pysd/py_backend/functions.py
pulse
def pulse(time, start, duration): """ Implements vensim's PULSE function In range [-inf, start) returns 0 In range [start, start + duration) returns 1 In range [start + duration, +inf] returns 0 """ t = time() return 1 if start <= t < start + duration else 0
python
def pulse(time, start, duration): """ Implements vensim's PULSE function In range [-inf, start) returns 0 In range [start, start + duration) returns 1 In range [start + duration, +inf] returns 0 """ t = time() return 1 if start <= t < start + duration else 0
[ "def", "pulse", "(", "time", ",", "start", ",", "duration", ")", ":", "t", "=", "time", "(", ")", "return", "1", "if", "start", "<=", "t", "<", "start", "+", "duration", "else", "0" ]
Implements vensim's PULSE function In range [-inf, start) returns 0 In range [start, start + duration) returns 1 In range [start + duration, +inf] returns 0
[ "Implements", "vensim", "s", "PULSE", "function" ]
bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L859-L867
15,098
JamesPHoughton/pysd
pysd/py_backend/functions.py
pulse_train
def pulse_train(time, start, duration, repeat_time, end): """ Implements vensim's PULSE TRAIN function In range [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + duration) return 1 In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0 """ t = time() if start <= t < end: return 1 if (t - start) % repeat_time < duration else 0 else: return 0
python
def pulse_train(time, start, duration, repeat_time, end): """ Implements vensim's PULSE TRAIN function In range [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + duration) return 1 In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0 """ t = time() if start <= t < end: return 1 if (t - start) % repeat_time < duration else 0 else: return 0
[ "def", "pulse_train", "(", "time", ",", "start", ",", "duration", ",", "repeat_time", ",", "end", ")", ":", "t", "=", "time", "(", ")", "if", "start", "<=", "t", "<", "end", ":", "return", "1", "if", "(", "t", "-", "start", ")", "%", "repeat_time", "<", "duration", "else", "0", "else", ":", "return", "0" ]
Implements vensim's PULSE TRAIN function In range [-inf, start) returns 0 In range [start + n * repeat_time, start + n * repeat_time + duration) return 1 In range [start + n * repeat_time + duration, start + (n+1) * repeat_time) return 0
[ "Implements", "vensim", "s", "PULSE", "TRAIN", "function" ]
bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L870-L881
15,099
JamesPHoughton/pysd
pysd/py_backend/functions.py
lookup_extrapolation
def lookup_extrapolation(x, xs, ys): """ Intermediate values are calculated with linear interpolation between the intermediate points. Out-of-range values are calculated with linear extrapolation from the last two values at either end. """ length = len(xs) if x < xs[0]: dx = xs[1] - xs[0] dy = ys[1] - ys[0] k = dy / dx return ys[0] + (x - xs[0]) * k if x > xs[length - 1]: dx = xs[length - 1] - xs[length - 2] dy = ys[length - 1] - ys[length - 2] k = dy / dx return ys[length - 1] + (x - xs[length - 1]) * k return np.interp(x, xs, ys)
python
def lookup_extrapolation(x, xs, ys): """ Intermediate values are calculated with linear interpolation between the intermediate points. Out-of-range values are calculated with linear extrapolation from the last two values at either end. """ length = len(xs) if x < xs[0]: dx = xs[1] - xs[0] dy = ys[1] - ys[0] k = dy / dx return ys[0] + (x - xs[0]) * k if x > xs[length - 1]: dx = xs[length - 1] - xs[length - 2] dy = ys[length - 1] - ys[length - 2] k = dy / dx return ys[length - 1] + (x - xs[length - 1]) * k return np.interp(x, xs, ys)
[ "def", "lookup_extrapolation", "(", "x", ",", "xs", ",", "ys", ")", ":", "length", "=", "len", "(", "xs", ")", "if", "x", "<", "xs", "[", "0", "]", ":", "dx", "=", "xs", "[", "1", "]", "-", "xs", "[", "0", "]", "dy", "=", "ys", "[", "1", "]", "-", "ys", "[", "0", "]", "k", "=", "dy", "/", "dx", "return", "ys", "[", "0", "]", "+", "(", "x", "-", "xs", "[", "0", "]", ")", "*", "k", "if", "x", ">", "xs", "[", "length", "-", "1", "]", ":", "dx", "=", "xs", "[", "length", "-", "1", "]", "-", "xs", "[", "length", "-", "2", "]", "dy", "=", "ys", "[", "length", "-", "1", "]", "-", "ys", "[", "length", "-", "2", "]", "k", "=", "dy", "/", "dx", "return", "ys", "[", "length", "-", "1", "]", "+", "(", "x", "-", "xs", "[", "length", "-", "1", "]", ")", "*", "k", "return", "np", ".", "interp", "(", "x", ",", "xs", ",", "ys", ")" ]
Intermediate values are calculated with linear interpolation between the intermediate points. Out-of-range values are calculated with linear extrapolation from the last two values at either end.
[ "Intermediate", "values", "are", "calculated", "with", "linear", "interpolation", "between", "the", "intermediate", "points", ".", "Out", "-", "of", "-", "range", "values", "are", "calculated", "with", "linear", "extrapolation", "from", "the", "last", "two", "values", "at", "either", "end", "." ]
bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda
https://github.com/JamesPHoughton/pysd/blob/bf1b1d03954e9ba5acac9ba4f1ada7cd93352eda/pysd/py_backend/functions.py#L917-L933