id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
13,700
tmr232/Sark
sark/code/function.py
Function.set_name
def set_name(self, name, anyway=False): """Set Function Name. Default behavior throws an exception when setting to a name that already exists in the IDB. to make IDA automatically add a counter to the name (like in the GUI,) use `anyway=True`. Args: name: Desired name. anyway: `True` to set anyway. """ set_name(self.startEA, name, anyway=anyway)
python
def set_name(self, name, anyway=False): """Set Function Name. Default behavior throws an exception when setting to a name that already exists in the IDB. to make IDA automatically add a counter to the name (like in the GUI,) use `anyway=True`. Args: name: Desired name. anyway: `True` to set anyway. """ set_name(self.startEA, name, anyway=anyway)
[ "def", "set_name", "(", "self", ",", "name", ",", "anyway", "=", "False", ")", ":", "set_name", "(", "self", ".", "startEA", ",", "name", ",", "anyway", "=", "anyway", ")" ]
Set Function Name. Default behavior throws an exception when setting to a name that already exists in the IDB. to make IDA automatically add a counter to the name (like in the GUI,) use `anyway=True`. Args: name: Desired name. anyway: `True` to set anyway.
[ "Set", "Function", "Name", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/function.py#L308-L319
13,701
tmr232/Sark
sark/code/function.py
Function.color
def color(self): """Function color in IDA View""" color = idc.GetColor(self.ea, idc.CIC_FUNC) if color == 0xFFFFFFFF: return None return color
python
def color(self): """Function color in IDA View""" color = idc.GetColor(self.ea, idc.CIC_FUNC) if color == 0xFFFFFFFF: return None return color
[ "def", "color", "(", "self", ")", ":", "color", "=", "idc", ".", "GetColor", "(", "self", ".", "ea", ",", "idc", ".", "CIC_FUNC", ")", "if", "color", "==", "0xFFFFFFFF", ":", "return", "None", "return", "color" ]
Function color in IDA View
[ "Function", "color", "in", "IDA", "View" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/function.py#L336-L342
13,702
tmr232/Sark
sark/code/function.py
Function.color
def color(self, color): """Function Color in IDA View. Set color to `None` to clear the color. """ if color is None: color = 0xFFFFFFFF idc.SetColor(self.ea, idc.CIC_FUNC, color)
python
def color(self, color): """Function Color in IDA View. Set color to `None` to clear the color. """ if color is None: color = 0xFFFFFFFF idc.SetColor(self.ea, idc.CIC_FUNC, color)
[ "def", "color", "(", "self", ",", "color", ")", ":", "if", "color", "is", "None", ":", "color", "=", "0xFFFFFFFF", "idc", ".", "SetColor", "(", "self", ".", "ea", ",", "idc", ".", "CIC_FUNC", ",", "color", ")" ]
Function Color in IDA View. Set color to `None` to clear the color.
[ "Function", "Color", "in", "IDA", "View", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/function.py#L346-L354
13,703
tmr232/Sark
sark/code/line.py
lines
def lines(start=None, end=None, reverse=False, selection=False): """Iterate lines in range. Args: start: Starting address, start of IDB if `None`. end: End address, end of IDB if `None`. reverse: Set to true to iterate in reverse order. selection: If set to True, replaces start and end with current selection. Returns: iterator of `Line` objects. """ if selection: start, end = get_selection() else: start, end = fix_addresses(start, end) if not reverse: item = idaapi.get_item_head(start) while item < end: yield Line(item) item += idaapi.get_item_size(item) else: # if reverse: item = idaapi.get_item_head(end - 1) while item >= start: yield Line(item) item = idaapi.get_item_head(item - 1)
python
def lines(start=None, end=None, reverse=False, selection=False): """Iterate lines in range. Args: start: Starting address, start of IDB if `None`. end: End address, end of IDB if `None`. reverse: Set to true to iterate in reverse order. selection: If set to True, replaces start and end with current selection. Returns: iterator of `Line` objects. """ if selection: start, end = get_selection() else: start, end = fix_addresses(start, end) if not reverse: item = idaapi.get_item_head(start) while item < end: yield Line(item) item += idaapi.get_item_size(item) else: # if reverse: item = idaapi.get_item_head(end - 1) while item >= start: yield Line(item) item = idaapi.get_item_head(item - 1)
[ "def", "lines", "(", "start", "=", "None", ",", "end", "=", "None", ",", "reverse", "=", "False", ",", "selection", "=", "False", ")", ":", "if", "selection", ":", "start", ",", "end", "=", "get_selection", "(", ")", "else", ":", "start", ",", "end", "=", "fix_addresses", "(", "start", ",", "end", ")", "if", "not", "reverse", ":", "item", "=", "idaapi", ".", "get_item_head", "(", "start", ")", "while", "item", "<", "end", ":", "yield", "Line", "(", "item", ")", "item", "+=", "idaapi", ".", "get_item_size", "(", "item", ")", "else", ":", "# if reverse:", "item", "=", "idaapi", ".", "get_item_head", "(", "end", "-", "1", ")", "while", "item", ">=", "start", ":", "yield", "Line", "(", "item", ")", "item", "=", "idaapi", ".", "get_item_head", "(", "item", "-", "1", ")" ]
Iterate lines in range. Args: start: Starting address, start of IDB if `None`. end: End address, end of IDB if `None`. reverse: Set to true to iterate in reverse order. selection: If set to True, replaces start and end with current selection. Returns: iterator of `Line` objects.
[ "Iterate", "lines", "in", "range", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/line.py#L327-L355
13,704
tmr232/Sark
sark/code/line.py
Line.type
def type(self): """return the type of the Line """ properties = {self.is_code: "code", self.is_data: "data", self.is_string: "string", self.is_tail: "tail", self.is_unknown: "unknown"} for k, v in properties.items(): if k: return v
python
def type(self): """return the type of the Line """ properties = {self.is_code: "code", self.is_data: "data", self.is_string: "string", self.is_tail: "tail", self.is_unknown: "unknown"} for k, v in properties.items(): if k: return v
[ "def", "type", "(", "self", ")", ":", "properties", "=", "{", "self", ".", "is_code", ":", "\"code\"", ",", "self", ".", "is_data", ":", "\"data\"", ",", "self", ".", "is_string", ":", "\"string\"", ",", "self", ".", "is_tail", ":", "\"tail\"", ",", "self", ".", "is_unknown", ":", "\"unknown\"", "}", "for", "k", ",", "v", "in", "properties", ".", "items", "(", ")", ":", "if", "k", ":", "return", "v" ]
return the type of the Line
[ "return", "the", "type", "of", "the", "Line" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/line.py#L195-L203
13,705
tmr232/Sark
sark/code/line.py
Line.color
def color(self): """Line color in IDA View""" color = idc.GetColor(self.ea, idc.CIC_ITEM) if color == 0xFFFFFFFF: return None return color
python
def color(self): """Line color in IDA View""" color = idc.GetColor(self.ea, idc.CIC_ITEM) if color == 0xFFFFFFFF: return None return color
[ "def", "color", "(", "self", ")", ":", "color", "=", "idc", ".", "GetColor", "(", "self", ".", "ea", ",", "idc", ".", "CIC_ITEM", ")", "if", "color", "==", "0xFFFFFFFF", ":", "return", "None", "return", "color" ]
Line color in IDA View
[ "Line", "color", "in", "IDA", "View" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/line.py#L274-L280
13,706
tmr232/Sark
sark/code/line.py
Line.color
def color(self, color): """Line Color in IDA View. Set color to `None` to clear the color. """ if color is None: color = 0xFFFFFFFF idc.SetColor(self.ea, idc.CIC_ITEM, color)
python
def color(self, color): """Line Color in IDA View. Set color to `None` to clear the color. """ if color is None: color = 0xFFFFFFFF idc.SetColor(self.ea, idc.CIC_ITEM, color)
[ "def", "color", "(", "self", ",", "color", ")", ":", "if", "color", "is", "None", ":", "color", "=", "0xFFFFFFFF", "idc", ".", "SetColor", "(", "self", ".", "ea", ",", "idc", ".", "CIC_ITEM", ",", "color", ")" ]
Line Color in IDA View. Set color to `None` to clear the color.
[ "Line", "Color", "in", "IDA", "View", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/line.py#L284-L292
13,707
tmr232/Sark
sark/qt.py
capture_widget
def capture_widget(widget, path=None): """Grab an image of a Qt widget Args: widget: The Qt Widget to capture path (optional): The path to save to. If not provided - will return image data. Returns: If a path is provided, the image will be saved to it. If not, the PNG buffer will be returned. """ if use_qt5: pixmap = widget.grab() else: pixmap = QtGui.QPixmap.grabWidget(widget) if path: pixmap.save(path) else: image_buffer = QtCore.QBuffer() image_buffer.open(QtCore.QIODevice.ReadWrite) pixmap.save(image_buffer, "PNG") return image_buffer.data().data()
python
def capture_widget(widget, path=None): """Grab an image of a Qt widget Args: widget: The Qt Widget to capture path (optional): The path to save to. If not provided - will return image data. Returns: If a path is provided, the image will be saved to it. If not, the PNG buffer will be returned. """ if use_qt5: pixmap = widget.grab() else: pixmap = QtGui.QPixmap.grabWidget(widget) if path: pixmap.save(path) else: image_buffer = QtCore.QBuffer() image_buffer.open(QtCore.QIODevice.ReadWrite) pixmap.save(image_buffer, "PNG") return image_buffer.data().data()
[ "def", "capture_widget", "(", "widget", ",", "path", "=", "None", ")", ":", "if", "use_qt5", ":", "pixmap", "=", "widget", ".", "grab", "(", ")", "else", ":", "pixmap", "=", "QtGui", ".", "QPixmap", ".", "grabWidget", "(", "widget", ")", "if", "path", ":", "pixmap", ".", "save", "(", "path", ")", "else", ":", "image_buffer", "=", "QtCore", ".", "QBuffer", "(", ")", "image_buffer", ".", "open", "(", "QtCore", ".", "QIODevice", ".", "ReadWrite", ")", "pixmap", ".", "save", "(", "image_buffer", ",", "\"PNG\"", ")", "return", "image_buffer", ".", "data", "(", ")", ".", "data", "(", ")" ]
Grab an image of a Qt widget Args: widget: The Qt Widget to capture path (optional): The path to save to. If not provided - will return image data. Returns: If a path is provided, the image will be saved to it. If not, the PNG buffer will be returned.
[ "Grab", "an", "image", "of", "a", "Qt", "widget" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/qt.py#L14-L39
13,708
tmr232/Sark
sark/qt.py
get_widget
def get_widget(title): """Get the Qt widget of the IDA window with the given title.""" tform = idaapi.find_tform(title) if not tform: raise exceptions.FormNotFound("No form titled {!r} found.".format(title)) return form_to_widget(tform)
python
def get_widget(title): """Get the Qt widget of the IDA window with the given title.""" tform = idaapi.find_tform(title) if not tform: raise exceptions.FormNotFound("No form titled {!r} found.".format(title)) return form_to_widget(tform)
[ "def", "get_widget", "(", "title", ")", ":", "tform", "=", "idaapi", ".", "find_tform", "(", "title", ")", "if", "not", "tform", ":", "raise", "exceptions", ".", "FormNotFound", "(", "\"No form titled {!r} found.\"", ".", "format", "(", "title", ")", ")", "return", "form_to_widget", "(", "tform", ")" ]
Get the Qt widget of the IDA window with the given title.
[ "Get", "the", "Qt", "widget", "of", "the", "IDA", "window", "with", "the", "given", "title", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/qt.py#L42-L48
13,709
tmr232/Sark
sark/qt.py
get_window
def get_window(): """Get IDA's top level window.""" tform = idaapi.get_current_tform() # Required sometimes when closing IDBs and not IDA. if not tform: tform = idaapi.find_tform("Output window") widget = form_to_widget(tform) window = widget.window() return window
python
def get_window(): """Get IDA's top level window.""" tform = idaapi.get_current_tform() # Required sometimes when closing IDBs and not IDA. if not tform: tform = idaapi.find_tform("Output window") widget = form_to_widget(tform) window = widget.window() return window
[ "def", "get_window", "(", ")", ":", "tform", "=", "idaapi", ".", "get_current_tform", "(", ")", "# Required sometimes when closing IDBs and not IDA.", "if", "not", "tform", ":", "tform", "=", "idaapi", ".", "find_tform", "(", "\"Output window\"", ")", "widget", "=", "form_to_widget", "(", "tform", ")", "window", "=", "widget", ".", "window", "(", ")", "return", "window" ]
Get IDA's top level window.
[ "Get", "IDA", "s", "top", "level", "window", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/qt.py#L56-L66
13,710
tmr232/Sark
sark/qt.py
MenuManager.add_menu
def add_menu(self, name): """Add a top-level menu. The menu manager only allows one menu of the same name. However, it does not make sure that there are no pre-existing menus of that name. """ if name in self._menus: raise exceptions.MenuAlreadyExists("Menu name {!r} already exists.".format(name)) menu = self._menu.addMenu(name) self._menus[name] = menu
python
def add_menu(self, name): """Add a top-level menu. The menu manager only allows one menu of the same name. However, it does not make sure that there are no pre-existing menus of that name. """ if name in self._menus: raise exceptions.MenuAlreadyExists("Menu name {!r} already exists.".format(name)) menu = self._menu.addMenu(name) self._menus[name] = menu
[ "def", "add_menu", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_menus", ":", "raise", "exceptions", ".", "MenuAlreadyExists", "(", "\"Menu name {!r} already exists.\"", ".", "format", "(", "name", ")", ")", "menu", "=", "self", ".", "_menu", ".", "addMenu", "(", "name", ")", "self", ".", "_menus", "[", "name", "]", "=", "menu" ]
Add a top-level menu. The menu manager only allows one menu of the same name. However, it does not make sure that there are no pre-existing menus of that name.
[ "Add", "a", "top", "-", "level", "menu", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/qt.py#L109-L118
13,711
tmr232/Sark
sark/qt.py
MenuManager.remove_menu
def remove_menu(self, name): """Remove a top-level menu. Only removes menus created by the same menu manager. """ if name not in self._menus: raise exceptions.MenuNotFound( "Menu {!r} was not found. It might be deleted, or belong to another menu manager.".format(name)) self._menu.removeAction(self._menus[name].menuAction()) del self._menus[name]
python
def remove_menu(self, name): """Remove a top-level menu. Only removes menus created by the same menu manager. """ if name not in self._menus: raise exceptions.MenuNotFound( "Menu {!r} was not found. It might be deleted, or belong to another menu manager.".format(name)) self._menu.removeAction(self._menus[name].menuAction()) del self._menus[name]
[ "def", "remove_menu", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "_menus", ":", "raise", "exceptions", ".", "MenuNotFound", "(", "\"Menu {!r} was not found. It might be deleted, or belong to another menu manager.\"", ".", "format", "(", "name", ")", ")", "self", ".", "_menu", ".", "removeAction", "(", "self", ".", "_menus", "[", "name", "]", ".", "menuAction", "(", ")", ")", "del", "self", ".", "_menus", "[", "name", "]" ]
Remove a top-level menu. Only removes menus created by the same menu manager.
[ "Remove", "a", "top", "-", "level", "menu", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/qt.py#L120-L130
13,712
tmr232/Sark
sark/qt.py
MenuManager.clear
def clear(self): """Clear all menus created by this manager.""" for menu in self._menus.itervalues(): self._menu.removeAction(menu.menuAction()) self._menus = {}
python
def clear(self): """Clear all menus created by this manager.""" for menu in self._menus.itervalues(): self._menu.removeAction(menu.menuAction()) self._menus = {}
[ "def", "clear", "(", "self", ")", ":", "for", "menu", "in", "self", ".", "_menus", ".", "itervalues", "(", ")", ":", "self", ".", "_menu", ".", "removeAction", "(", "menu", ".", "menuAction", "(", ")", ")", "self", ".", "_menus", "=", "{", "}" ]
Clear all menus created by this manager.
[ "Clear", "all", "menus", "created", "by", "this", "manager", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/qt.py#L132-L136
13,713
tmr232/Sark
sark/debug.py
Registers.get_by_flags
def get_by_flags(self, flags): """Iterate all register infos matching the given flags.""" for reg in self._reg_infos: if reg.flags & flags == flags: yield reg
python
def get_by_flags(self, flags): """Iterate all register infos matching the given flags.""" for reg in self._reg_infos: if reg.flags & flags == flags: yield reg
[ "def", "get_by_flags", "(", "self", ",", "flags", ")", ":", "for", "reg", "in", "self", ".", "_reg_infos", ":", "if", "reg", ".", "flags", "&", "flags", "==", "flags", ":", "yield", "reg" ]
Iterate all register infos matching the given flags.
[ "Iterate", "all", "register", "infos", "matching", "the", "given", "flags", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/debug.py#L39-L43
13,714
tmr232/Sark
sark/debug.py
Registers.get_single_by_flags
def get_single_by_flags(self, flags): """Get the register info matching the flag. Raises ValueError if more than one are found.""" regs = list(self.get_by_flags(flags)) if len(regs) != 1: raise ValueError("Flags do not return unique resigter. {!r}", regs) return regs[0]
python
def get_single_by_flags(self, flags): """Get the register info matching the flag. Raises ValueError if more than one are found.""" regs = list(self.get_by_flags(flags)) if len(regs) != 1: raise ValueError("Flags do not return unique resigter. {!r}", regs) return regs[0]
[ "def", "get_single_by_flags", "(", "self", ",", "flags", ")", ":", "regs", "=", "list", "(", "self", ".", "get_by_flags", "(", "flags", ")", ")", "if", "len", "(", "regs", ")", "!=", "1", ":", "raise", "ValueError", "(", "\"Flags do not return unique resigter. {!r}\"", ",", "regs", ")", "return", "regs", "[", "0", "]" ]
Get the register info matching the flag. Raises ValueError if more than one are found.
[ "Get", "the", "register", "info", "matching", "the", "flag", ".", "Raises", "ValueError", "if", "more", "than", "one", "are", "found", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/debug.py#L45-L51
13,715
tmr232/Sark
sark/code/segment.py
segments
def segments(seg_type=None): """Iterate segments based on type Args: seg_type: type of segment e.g. SEG_CODE Returns: iterator of `Segment` objects. if seg_type is None , returns all segments otherwise returns only the relevant ones """ for index in xrange(idaapi.get_segm_qty()): seg = Segment(index=index) if (seg_type is None) or (seg.type == seg_type): yield Segment(index=index)
python
def segments(seg_type=None): """Iterate segments based on type Args: seg_type: type of segment e.g. SEG_CODE Returns: iterator of `Segment` objects. if seg_type is None , returns all segments otherwise returns only the relevant ones """ for index in xrange(idaapi.get_segm_qty()): seg = Segment(index=index) if (seg_type is None) or (seg.type == seg_type): yield Segment(index=index)
[ "def", "segments", "(", "seg_type", "=", "None", ")", ":", "for", "index", "in", "xrange", "(", "idaapi", ".", "get_segm_qty", "(", ")", ")", ":", "seg", "=", "Segment", "(", "index", "=", "index", ")", "if", "(", "seg_type", "is", "None", ")", "or", "(", "seg", ".", "type", "==", "seg_type", ")", ":", "yield", "Segment", "(", "index", "=", "index", ")" ]
Iterate segments based on type Args: seg_type: type of segment e.g. SEG_CODE Returns: iterator of `Segment` objects. if seg_type is None , returns all segments otherwise returns only the relevant ones
[ "Iterate", "segments", "based", "on", "type" ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/segment.py#L250-L264
13,716
tmr232/Sark
sark/code/segment.py
Segment.next
def next(self): """Get the next segment.""" seg = Segment(segment_t=idaapi.get_next_seg(self.ea)) if seg.ea <= self.ea: raise exceptions.NoMoreSegments("This is the last segment. No segments exist after it.") return seg
python
def next(self): """Get the next segment.""" seg = Segment(segment_t=idaapi.get_next_seg(self.ea)) if seg.ea <= self.ea: raise exceptions.NoMoreSegments("This is the last segment. No segments exist after it.") return seg
[ "def", "next", "(", "self", ")", ":", "seg", "=", "Segment", "(", "segment_t", "=", "idaapi", ".", "get_next_seg", "(", "self", ".", "ea", ")", ")", "if", "seg", ".", "ea", "<=", "self", ".", "ea", ":", "raise", "exceptions", ".", "NoMoreSegments", "(", "\"This is the last segment. No segments exist after it.\"", ")", "return", "seg" ]
Get the next segment.
[ "Get", "the", "next", "segment", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/segment.py#L211-L218
13,717
tmr232/Sark
sark/code/segment.py
Segment.prev
def prev(self): """Get the previous segment.""" seg = Segment(segment_t=idaapi.get_prev_seg(self.ea)) if seg.ea >= self.ea: raise exceptions.NoMoreSegments("This is the first segment. no segments exist before it.") return seg
python
def prev(self): """Get the previous segment.""" seg = Segment(segment_t=idaapi.get_prev_seg(self.ea)) if seg.ea >= self.ea: raise exceptions.NoMoreSegments("This is the first segment. no segments exist before it.") return seg
[ "def", "prev", "(", "self", ")", ":", "seg", "=", "Segment", "(", "segment_t", "=", "idaapi", ".", "get_prev_seg", "(", "self", ".", "ea", ")", ")", "if", "seg", ".", "ea", ">=", "self", ".", "ea", ":", "raise", "exceptions", ".", "NoMoreSegments", "(", "\"This is the first segment. no segments exist before it.\"", ")", "return", "seg" ]
Get the previous segment.
[ "Get", "the", "previous", "segment", "." ]
bee62879c2aea553a3924d887e2b30f2a6008581
https://github.com/tmr232/Sark/blob/bee62879c2aea553a3924d887e2b30f2a6008581/sark/code/segment.py#L221-L228
13,718
thoth-station/solver
thoth/solver/python/base.py
get_ecosystem_solver
def get_ecosystem_solver(ecosystem_name, parser_kwargs=None, fetcher_kwargs=None): """Get Solver subclass instance for particular ecosystem. :param ecosystem_name: name of ecosystem for which solver should be get :param parser_kwargs: parser key-value arguments for constructor :param fetcher_kwargs: fetcher key-value arguments for constructor :return: Solver """ from .python import PythonSolver if ecosystem_name.lower() == "pypi": source = Source(url="https://pypi.org/simple", warehouse_api_url="https://pypi.org/pypi", warehouse=True) return PythonSolver(parser_kwargs, fetcher_kwargs={"source": source}) raise NotImplementedError("Unknown ecosystem: {}".format(ecosystem_name))
python
def get_ecosystem_solver(ecosystem_name, parser_kwargs=None, fetcher_kwargs=None): """Get Solver subclass instance for particular ecosystem. :param ecosystem_name: name of ecosystem for which solver should be get :param parser_kwargs: parser key-value arguments for constructor :param fetcher_kwargs: fetcher key-value arguments for constructor :return: Solver """ from .python import PythonSolver if ecosystem_name.lower() == "pypi": source = Source(url="https://pypi.org/simple", warehouse_api_url="https://pypi.org/pypi", warehouse=True) return PythonSolver(parser_kwargs, fetcher_kwargs={"source": source}) raise NotImplementedError("Unknown ecosystem: {}".format(ecosystem_name))
[ "def", "get_ecosystem_solver", "(", "ecosystem_name", ",", "parser_kwargs", "=", "None", ",", "fetcher_kwargs", "=", "None", ")", ":", "from", ".", "python", "import", "PythonSolver", "if", "ecosystem_name", ".", "lower", "(", ")", "==", "\"pypi\"", ":", "source", "=", "Source", "(", "url", "=", "\"https://pypi.org/simple\"", ",", "warehouse_api_url", "=", "\"https://pypi.org/pypi\"", ",", "warehouse", "=", "True", ")", "return", "PythonSolver", "(", "parser_kwargs", ",", "fetcher_kwargs", "=", "{", "\"source\"", ":", "source", "}", ")", "raise", "NotImplementedError", "(", "\"Unknown ecosystem: {}\"", ".", "format", "(", "ecosystem_name", ")", ")" ]
Get Solver subclass instance for particular ecosystem. :param ecosystem_name: name of ecosystem for which solver should be get :param parser_kwargs: parser key-value arguments for constructor :param fetcher_kwargs: fetcher key-value arguments for constructor :return: Solver
[ "Get", "Solver", "subclass", "instance", "for", "particular", "ecosystem", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/base.py#L297-L311
13,719
thoth-station/solver
thoth/solver/python/base.py
Dependency.check
def check(self, version): # Ignore PyDocStyleBear """Check if `version` fits into our dependency specification. :param version: str :return: bool """ def _compare_spec(spec): if len(spec) == 1: spec = ("=", spec[0]) token = Tokens.operators.index(spec[0]) comparison = compare_version(version, spec[1]) if token in [Tokens.EQ1, Tokens.EQ2]: return comparison == 0 elif token == Tokens.GT: return comparison == 1 elif token == Tokens.LT: return comparison == -1 elif token == Tokens.GTE: return comparison >= 0 elif token == Tokens.LTE: return comparison <= 0 elif token == Tokens.NEQ: return comparison != 0 else: raise ValueError("Invalid comparison token") results, intermediaries = False, False for spec in self.spec: if isinstance(spec, list): intermediary = True for sub in spec: intermediary &= _compare_spec(sub) intermediaries |= intermediary elif isinstance(spec, tuple): results |= _compare_spec(spec) return results or intermediaries
python
def check(self, version): # Ignore PyDocStyleBear """Check if `version` fits into our dependency specification. :param version: str :return: bool """ def _compare_spec(spec): if len(spec) == 1: spec = ("=", spec[0]) token = Tokens.operators.index(spec[0]) comparison = compare_version(version, spec[1]) if token in [Tokens.EQ1, Tokens.EQ2]: return comparison == 0 elif token == Tokens.GT: return comparison == 1 elif token == Tokens.LT: return comparison == -1 elif token == Tokens.GTE: return comparison >= 0 elif token == Tokens.LTE: return comparison <= 0 elif token == Tokens.NEQ: return comparison != 0 else: raise ValueError("Invalid comparison token") results, intermediaries = False, False for spec in self.spec: if isinstance(spec, list): intermediary = True for sub in spec: intermediary &= _compare_spec(sub) intermediaries |= intermediary elif isinstance(spec, tuple): results |= _compare_spec(spec) return results or intermediaries
[ "def", "check", "(", "self", ",", "version", ")", ":", "# Ignore PyDocStyleBear", "def", "_compare_spec", "(", "spec", ")", ":", "if", "len", "(", "spec", ")", "==", "1", ":", "spec", "=", "(", "\"=\"", ",", "spec", "[", "0", "]", ")", "token", "=", "Tokens", ".", "operators", ".", "index", "(", "spec", "[", "0", "]", ")", "comparison", "=", "compare_version", "(", "version", ",", "spec", "[", "1", "]", ")", "if", "token", "in", "[", "Tokens", ".", "EQ1", ",", "Tokens", ".", "EQ2", "]", ":", "return", "comparison", "==", "0", "elif", "token", "==", "Tokens", ".", "GT", ":", "return", "comparison", "==", "1", "elif", "token", "==", "Tokens", ".", "LT", ":", "return", "comparison", "==", "-", "1", "elif", "token", "==", "Tokens", ".", "GTE", ":", "return", "comparison", ">=", "0", "elif", "token", "==", "Tokens", ".", "LTE", ":", "return", "comparison", "<=", "0", "elif", "token", "==", "Tokens", ".", "NEQ", ":", "return", "comparison", "!=", "0", "else", ":", "raise", "ValueError", "(", "\"Invalid comparison token\"", ")", "results", ",", "intermediaries", "=", "False", ",", "False", "for", "spec", "in", "self", ".", "spec", ":", "if", "isinstance", "(", "spec", ",", "list", ")", ":", "intermediary", "=", "True", "for", "sub", "in", "spec", ":", "intermediary", "&=", "_compare_spec", "(", "sub", ")", "intermediaries", "|=", "intermediary", "elif", "isinstance", "(", "spec", ",", "tuple", ")", ":", "results", "|=", "_compare_spec", "(", "spec", ")", "return", "results", "or", "intermediaries" ]
Check if `version` fits into our dependency specification. :param version: str :return: bool
[ "Check", "if", "version", "fits", "into", "our", "dependency", "specification", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/base.py#L143-L181
13,720
thoth-station/solver
thoth/solver/python/base.py
Solver.solve
def solve(self, dependencies, graceful=True, all_versions=False): # Ignore PyDocStyleBear """Solve `dependencies` against upstream repository. :param dependencies: List, List of dependencies in native format :param graceful: bool, Print info output to stdout :param all_versions: bool, Return all matched versions instead of the latest :return: Dict[str, str], Matched versions """ def _compare_version_index_url(v1, v2): """Get a wrapper around compare version to omit index url when sorting.""" return compare_version(v1[0], v2[0]) solved = {} for dep in self.dependency_parser.parse(dependencies): _LOGGER.debug("Fetching releases for: {}".format(dep)) name, releases = self.release_fetcher.fetch_releases(dep.name) if name in solved: raise SolverException("Dependency: {} is listed multiple times".format(name)) if not releases: if graceful: _LOGGER.info("No releases found for package %s", dep.name) else: raise SolverException("No releases found for package {}".format(dep.name)) releases = [release for release in releases if release in dep] matching = sorted(releases, key=cmp_to_key(_compare_version_index_url)) _LOGGER.debug(" matching: %s", matching) if all_versions: solved[name] = matching else: if not matching: solved[name] = None else: if self._highest_dependency_version: solved[name] = matching[-1] else: solved[name] = matching[0] return solved
python
def solve(self, dependencies, graceful=True, all_versions=False): # Ignore PyDocStyleBear """Solve `dependencies` against upstream repository. :param dependencies: List, List of dependencies in native format :param graceful: bool, Print info output to stdout :param all_versions: bool, Return all matched versions instead of the latest :return: Dict[str, str], Matched versions """ def _compare_version_index_url(v1, v2): """Get a wrapper around compare version to omit index url when sorting.""" return compare_version(v1[0], v2[0]) solved = {} for dep in self.dependency_parser.parse(dependencies): _LOGGER.debug("Fetching releases for: {}".format(dep)) name, releases = self.release_fetcher.fetch_releases(dep.name) if name in solved: raise SolverException("Dependency: {} is listed multiple times".format(name)) if not releases: if graceful: _LOGGER.info("No releases found for package %s", dep.name) else: raise SolverException("No releases found for package {}".format(dep.name)) releases = [release for release in releases if release in dep] matching = sorted(releases, key=cmp_to_key(_compare_version_index_url)) _LOGGER.debug(" matching: %s", matching) if all_versions: solved[name] = matching else: if not matching: solved[name] = None else: if self._highest_dependency_version: solved[name] = matching[-1] else: solved[name] = matching[0] return solved
[ "def", "solve", "(", "self", ",", "dependencies", ",", "graceful", "=", "True", ",", "all_versions", "=", "False", ")", ":", "# Ignore PyDocStyleBear", "def", "_compare_version_index_url", "(", "v1", ",", "v2", ")", ":", "\"\"\"Get a wrapper around compare version to omit index url when sorting.\"\"\"", "return", "compare_version", "(", "v1", "[", "0", "]", ",", "v2", "[", "0", "]", ")", "solved", "=", "{", "}", "for", "dep", "in", "self", ".", "dependency_parser", ".", "parse", "(", "dependencies", ")", ":", "_LOGGER", ".", "debug", "(", "\"Fetching releases for: {}\"", ".", "format", "(", "dep", ")", ")", "name", ",", "releases", "=", "self", ".", "release_fetcher", ".", "fetch_releases", "(", "dep", ".", "name", ")", "if", "name", "in", "solved", ":", "raise", "SolverException", "(", "\"Dependency: {} is listed multiple times\"", ".", "format", "(", "name", ")", ")", "if", "not", "releases", ":", "if", "graceful", ":", "_LOGGER", ".", "info", "(", "\"No releases found for package %s\"", ",", "dep", ".", "name", ")", "else", ":", "raise", "SolverException", "(", "\"No releases found for package {}\"", ".", "format", "(", "dep", ".", "name", ")", ")", "releases", "=", "[", "release", "for", "release", "in", "releases", "if", "release", "in", "dep", "]", "matching", "=", "sorted", "(", "releases", ",", "key", "=", "cmp_to_key", "(", "_compare_version_index_url", ")", ")", "_LOGGER", ".", "debug", "(", "\" matching: %s\"", ",", "matching", ")", "if", "all_versions", ":", "solved", "[", "name", "]", "=", "matching", "else", ":", "if", "not", "matching", ":", "solved", "[", "name", "]", "=", "None", "else", ":", "if", "self", ".", "_highest_dependency_version", ":", "solved", "[", "name", "]", "=", "matching", "[", "-", "1", "]", "else", ":", "solved", "[", "name", "]", "=", "matching", "[", "0", "]", "return", "solved" ]
Solve `dependencies` against upstream repository. :param dependencies: List, List of dependencies in native format :param graceful: bool, Print info output to stdout :param all_versions: bool, Return all matched versions instead of the latest :return: Dict[str, str], Matched versions
[ "Solve", "dependencies", "against", "upstream", "repository", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/base.py#L250-L294
13,721
thoth-station/solver
thoth/solver/compile.py
pip_compile
def pip_compile(*packages: str): """Run pip-compile to pin down packages, also resolve their transitive dependencies.""" result = None packages = "\n".join(packages) with tempfile.TemporaryDirectory() as tmp_dirname, cwd(tmp_dirname): with open("requirements.in", "w") as requirements_file: requirements_file.write(packages) runner = CliRunner() try: result = runner.invoke(cli, ["requirements.in"], catch_exceptions=False) except Exception as exc: raise ThothPipCompileError(str(exc)) from exc if result.exit_code != 0: error_msg = ( f"pip-compile returned non-zero ({result.exit_code:d}) " f"output: {result.output_bytes.decode():s}" ) raise ThothPipCompileError(error_msg) return result.output_bytes.decode()
python
def pip_compile(*packages: str): """Run pip-compile to pin down packages, also resolve their transitive dependencies.""" result = None packages = "\n".join(packages) with tempfile.TemporaryDirectory() as tmp_dirname, cwd(tmp_dirname): with open("requirements.in", "w") as requirements_file: requirements_file.write(packages) runner = CliRunner() try: result = runner.invoke(cli, ["requirements.in"], catch_exceptions=False) except Exception as exc: raise ThothPipCompileError(str(exc)) from exc if result.exit_code != 0: error_msg = ( f"pip-compile returned non-zero ({result.exit_code:d}) " f"output: {result.output_bytes.decode():s}" ) raise ThothPipCompileError(error_msg) return result.output_bytes.decode()
[ "def", "pip_compile", "(", "*", "packages", ":", "str", ")", ":", "result", "=", "None", "packages", "=", "\"\\n\"", ".", "join", "(", "packages", ")", "with", "tempfile", ".", "TemporaryDirectory", "(", ")", "as", "tmp_dirname", ",", "cwd", "(", "tmp_dirname", ")", ":", "with", "open", "(", "\"requirements.in\"", ",", "\"w\"", ")", "as", "requirements_file", ":", "requirements_file", ".", "write", "(", "packages", ")", "runner", "=", "CliRunner", "(", ")", "try", ":", "result", "=", "runner", ".", "invoke", "(", "cli", ",", "[", "\"requirements.in\"", "]", ",", "catch_exceptions", "=", "False", ")", "except", "Exception", "as", "exc", ":", "raise", "ThothPipCompileError", "(", "str", "(", "exc", ")", ")", "from", "exc", "if", "result", ".", "exit_code", "!=", "0", ":", "error_msg", "=", "(", "f\"pip-compile returned non-zero ({result.exit_code:d}) \"", "f\"output: {result.output_bytes.decode():s}\"", ")", "raise", "ThothPipCompileError", "(", "error_msg", ")", "return", "result", ".", "output_bytes", ".", "decode", "(", ")" ]
Run pip-compile to pin down packages, also resolve their transitive dependencies.
[ "Run", "pip", "-", "compile", "to", "pin", "down", "packages", "also", "resolve", "their", "transitive", "dependencies", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/compile.py#L30-L52
13,722
thoth-station/solver
thoth/solver/cli.py
_print_version
def _print_version(ctx, _, value): """Print solver version and exit.""" if not value or ctx.resilient_parsing: return click.echo(analyzer_version) ctx.exit()
python
def _print_version(ctx, _, value): """Print solver version and exit.""" if not value or ctx.resilient_parsing: return click.echo(analyzer_version) ctx.exit()
[ "def", "_print_version", "(", "ctx", ",", "_", ",", "value", ")", ":", "if", "not", "value", "or", "ctx", ".", "resilient_parsing", ":", "return", "click", ".", "echo", "(", "analyzer_version", ")", "ctx", ".", "exit", "(", ")" ]
Print solver version and exit.
[ "Print", "solver", "version", "and", "exit", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/cli.py#L37-L42
13,723
thoth-station/solver
thoth/solver/cli.py
cli
def cli(ctx=None, verbose=0): """Thoth solver command line interface.""" if ctx: ctx.auto_envvar_prefix = "THOTH_SOLVER" if verbose: _LOG.setLevel(logging.DEBUG) _LOG.debug("Debug mode is on")
python
def cli(ctx=None, verbose=0): """Thoth solver command line interface.""" if ctx: ctx.auto_envvar_prefix = "THOTH_SOLVER" if verbose: _LOG.setLevel(logging.DEBUG) _LOG.debug("Debug mode is on")
[ "def", "cli", "(", "ctx", "=", "None", ",", "verbose", "=", "0", ")", ":", "if", "ctx", ":", "ctx", ".", "auto_envvar_prefix", "=", "\"THOTH_SOLVER\"", "if", "verbose", ":", "_LOG", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "_LOG", ".", "debug", "(", "\"Debug mode is on\"", ")" ]
Thoth solver command line interface.
[ "Thoth", "solver", "command", "line", "interface", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/cli.py#L56-L64
13,724
thoth-station/solver
thoth/solver/cli.py
pypi
def pypi( click_ctx, requirements, index=None, python_version=3, exclude_packages=None, output=None, subgraph_check_api=None, no_transitive=True, no_pretty=False, ): """Manipulate with dependency requirements using PyPI.""" requirements = [requirement.strip() for requirement in requirements.split("\\n") if requirement] if not requirements: _LOG.error("No requirements specified, exiting") sys.exit(1) if not subgraph_check_api: _LOG.info( "No subgraph check API provided, no queries will be done for dependency subgraphs that should be avoided" ) # Ignore PycodestyleBear (E501) result = resolve_python( requirements, index_urls=index.split(",") if index else ("https://pypi.org/simple",), python_version=int(python_version), transitive=not no_transitive, exclude_packages=set(map(str.strip, (exclude_packages or "").split(","))), subgraph_check_api=subgraph_check_api, ) print_command_result( click_ctx, result, analyzer=analyzer_name, analyzer_version=analyzer_version, output=output or "-", pretty=not no_pretty, )
python
def pypi( click_ctx, requirements, index=None, python_version=3, exclude_packages=None, output=None, subgraph_check_api=None, no_transitive=True, no_pretty=False, ): """Manipulate with dependency requirements using PyPI.""" requirements = [requirement.strip() for requirement in requirements.split("\\n") if requirement] if not requirements: _LOG.error("No requirements specified, exiting") sys.exit(1) if not subgraph_check_api: _LOG.info( "No subgraph check API provided, no queries will be done for dependency subgraphs that should be avoided" ) # Ignore PycodestyleBear (E501) result = resolve_python( requirements, index_urls=index.split(",") if index else ("https://pypi.org/simple",), python_version=int(python_version), transitive=not no_transitive, exclude_packages=set(map(str.strip, (exclude_packages or "").split(","))), subgraph_check_api=subgraph_check_api, ) print_command_result( click_ctx, result, analyzer=analyzer_name, analyzer_version=analyzer_version, output=output or "-", pretty=not no_pretty, )
[ "def", "pypi", "(", "click_ctx", ",", "requirements", ",", "index", "=", "None", ",", "python_version", "=", "3", ",", "exclude_packages", "=", "None", ",", "output", "=", "None", ",", "subgraph_check_api", "=", "None", ",", "no_transitive", "=", "True", ",", "no_pretty", "=", "False", ",", ")", ":", "requirements", "=", "[", "requirement", ".", "strip", "(", ")", "for", "requirement", "in", "requirements", ".", "split", "(", "\"\\\\n\"", ")", "if", "requirement", "]", "if", "not", "requirements", ":", "_LOG", ".", "error", "(", "\"No requirements specified, exiting\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "not", "subgraph_check_api", ":", "_LOG", ".", "info", "(", "\"No subgraph check API provided, no queries will be done for dependency subgraphs that should be avoided\"", ")", "# Ignore PycodestyleBear (E501)", "result", "=", "resolve_python", "(", "requirements", ",", "index_urls", "=", "index", ".", "split", "(", "\",\"", ")", "if", "index", "else", "(", "\"https://pypi.org/simple\"", ",", ")", ",", "python_version", "=", "int", "(", "python_version", ")", ",", "transitive", "=", "not", "no_transitive", ",", "exclude_packages", "=", "set", "(", "map", "(", "str", ".", "strip", ",", "(", "exclude_packages", "or", "\"\"", ")", ".", "split", "(", "\",\"", ")", ")", ")", ",", "subgraph_check_api", "=", "subgraph_check_api", ",", ")", "print_command_result", "(", "click_ctx", ",", "result", ",", "analyzer", "=", "analyzer_name", ",", "analyzer_version", "=", "analyzer_version", ",", "output", "=", "output", "or", "\"-\"", ",", "pretty", "=", "not", "no_pretty", ",", ")" ]
Manipulate with dependency requirements using PyPI.
[ "Manipulate", "with", "dependency", "requirements", "using", "PyPI", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/cli.py#L110-L149
13,725
thoth-station/solver
thoth/solver/python/python.py
_create_entry
def _create_entry(entry: dict, source: Source = None) -> dict: """Filter and normalize the output of pipdeptree entry.""" entry["package_name"] = entry["package"].pop("package_name") entry["package_version"] = entry["package"].pop("installed_version") if source: entry["index_url"] = source.url entry["sha256"] = [] for item in source.get_package_hashes(entry["package_name"], entry["package_version"]): entry["sha256"].append(item["sha256"]) entry.pop("package") for dependency in entry["dependencies"]: dependency.pop("key", None) dependency.pop("installed_version", None) return entry
python
def _create_entry(entry: dict, source: Source = None) -> dict: """Filter and normalize the output of pipdeptree entry.""" entry["package_name"] = entry["package"].pop("package_name") entry["package_version"] = entry["package"].pop("installed_version") if source: entry["index_url"] = source.url entry["sha256"] = [] for item in source.get_package_hashes(entry["package_name"], entry["package_version"]): entry["sha256"].append(item["sha256"]) entry.pop("package") for dependency in entry["dependencies"]: dependency.pop("key", None) dependency.pop("installed_version", None) return entry
[ "def", "_create_entry", "(", "entry", ":", "dict", ",", "source", ":", "Source", "=", "None", ")", "->", "dict", ":", "entry", "[", "\"package_name\"", "]", "=", "entry", "[", "\"package\"", "]", ".", "pop", "(", "\"package_name\"", ")", "entry", "[", "\"package_version\"", "]", "=", "entry", "[", "\"package\"", "]", ".", "pop", "(", "\"installed_version\"", ")", "if", "source", ":", "entry", "[", "\"index_url\"", "]", "=", "source", ".", "url", "entry", "[", "\"sha256\"", "]", "=", "[", "]", "for", "item", "in", "source", ".", "get_package_hashes", "(", "entry", "[", "\"package_name\"", "]", ",", "entry", "[", "\"package_version\"", "]", ")", ":", "entry", "[", "\"sha256\"", "]", ".", "append", "(", "item", "[", "\"sha256\"", "]", ")", "entry", ".", "pop", "(", "\"package\"", ")", "for", "dependency", "in", "entry", "[", "\"dependencies\"", "]", ":", "dependency", ".", "pop", "(", "\"key\"", ",", "None", ")", "dependency", ".", "pop", "(", "\"installed_version\"", ",", "None", ")", "return", "entry" ]
Filter and normalize the output of pipdeptree entry.
[ "Filter", "and", "normalize", "the", "output", "of", "pipdeptree", "entry", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L40-L56
13,726
thoth-station/solver
thoth/solver/python/python.py
_get_environment_details
def _get_environment_details(python_bin: str) -> list: """Get information about packages in environment where packages get installed.""" cmd = "{} -m pipdeptree --json".format(python_bin) output = run_command(cmd, is_json=True).stdout return [_create_entry(entry) for entry in output]
python
def _get_environment_details(python_bin: str) -> list: """Get information about packages in environment where packages get installed.""" cmd = "{} -m pipdeptree --json".format(python_bin) output = run_command(cmd, is_json=True).stdout return [_create_entry(entry) for entry in output]
[ "def", "_get_environment_details", "(", "python_bin", ":", "str", ")", "->", "list", ":", "cmd", "=", "\"{} -m pipdeptree --json\"", ".", "format", "(", "python_bin", ")", "output", "=", "run_command", "(", "cmd", ",", "is_json", "=", "True", ")", ".", "stdout", "return", "[", "_create_entry", "(", "entry", ")", "for", "entry", "in", "output", "]" ]
Get information about packages in environment where packages get installed.
[ "Get", "information", "about", "packages", "in", "environment", "where", "packages", "get", "installed", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L59-L63
13,727
thoth-station/solver
thoth/solver/python/python.py
_should_resolve_subgraph
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool: """Ask the given subgraph check API if the given package in the given version should be included in the resolution. This subgraph resolving avoidence serves two purposes - we don't need to resolve dependency subgraphs that were already analyzed and we also avoid analyzing of "core" packages (like setuptools) where not needed as they can break installation environment. """ _LOGGER.info( "Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved", package_name, package_version, index_url, ) response = requests.get( subgraph_check_api, params={"package_name": package_name, "package_version": package_version, "index_url": index_url}, ) if response.status_code == 200: return True elif response.status_code == 208: # This is probably not the correct HTTP status code to be used here, but which one should be used? return False response.raise_for_status() raise ValueError( "Unreachable code - subgraph check API responded with unknown HTTP status " "code %s for package %r in version %r from index %r", package_name, package_version, index_url, )
python
def _should_resolve_subgraph(subgraph_check_api: str, package_name: str, package_version: str, index_url: str) -> bool: """Ask the given subgraph check API if the given package in the given version should be included in the resolution. This subgraph resolving avoidence serves two purposes - we don't need to resolve dependency subgraphs that were already analyzed and we also avoid analyzing of "core" packages (like setuptools) where not needed as they can break installation environment. """ _LOGGER.info( "Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved", package_name, package_version, index_url, ) response = requests.get( subgraph_check_api, params={"package_name": package_name, "package_version": package_version, "index_url": index_url}, ) if response.status_code == 200: return True elif response.status_code == 208: # This is probably not the correct HTTP status code to be used here, but which one should be used? return False response.raise_for_status() raise ValueError( "Unreachable code - subgraph check API responded with unknown HTTP status " "code %s for package %r in version %r from index %r", package_name, package_version, index_url, )
[ "def", "_should_resolve_subgraph", "(", "subgraph_check_api", ":", "str", ",", "package_name", ":", "str", ",", "package_version", ":", "str", ",", "index_url", ":", "str", ")", "->", "bool", ":", "_LOGGER", ".", "info", "(", "\"Checking if the given dependency subgraph for package %r in version %r from index %r should be resolved\"", ",", "package_name", ",", "package_version", ",", "index_url", ",", ")", "response", "=", "requests", ".", "get", "(", "subgraph_check_api", ",", "params", "=", "{", "\"package_name\"", ":", "package_name", ",", "\"package_version\"", ":", "package_version", ",", "\"index_url\"", ":", "index_url", "}", ",", ")", "if", "response", ".", "status_code", "==", "200", ":", "return", "True", "elif", "response", ".", "status_code", "==", "208", ":", "# This is probably not the correct HTTP status code to be used here, but which one should be used?", "return", "False", "response", ".", "raise_for_status", "(", ")", "raise", "ValueError", "(", "\"Unreachable code - subgraph check API responded with unknown HTTP status \"", "\"code %s for package %r in version %r from index %r\"", ",", "package_name", ",", "package_version", ",", "index_url", ",", ")" ]
Ask the given subgraph check API if the given package in the given version should be included in the resolution. This subgraph resolving avoidence serves two purposes - we don't need to resolve dependency subgraphs that were already analyzed and we also avoid analyzing of "core" packages (like setuptools) where not needed as they can break installation environment.
[ "Ask", "the", "given", "subgraph", "check", "API", "if", "the", "given", "package", "in", "the", "given", "version", "should", "be", "included", "in", "the", "resolution", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L66-L99
13,728
thoth-station/solver
thoth/solver/python/python.py
_install_requirement
def _install_requirement( python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True ) -> None: """Install requirements specified using suggested pip binary.""" previous_version = _pipdeptree(python_bin, package) try: cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package)) if version: cmd += "=={}".format(quote(version)) if index_url: cmd += ' --index-url "{}" '.format(quote(index_url)) # Supply trusted host by default so we do not get errors - it safe to # do it here as package indexes are managed by Thoth. trusted_host = urlparse(index_url).netloc cmd += " --trusted-host {}".format(trusted_host) _LOGGER.debug("Installing requirement %r in version %r", package, version) run_command(cmd) yield finally: if clean: _LOGGER.debug("Removing installed package %r", package) cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package)) result = run_command(cmd, raise_on_error=False) if result.return_code != 0: _LOGGER.warning( "Failed to restore previous environment by removing package %r (installed version %r), " "the error is not fatal but can affect future actions: %s", package, version, result.stderr, ) _LOGGER.debug( "Restoring previous environment setup after installation of %r (%s)", package, previous_version ) if previous_version: cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format( python_bin, quote(package), quote(previous_version["package"]["installed_version"]) ) result = run_command(cmd, raise_on_error=False) if result.return_code != 0: _LOGGER.warning( "Failed to restore previous environment for package %r (installed version %r), " ", the error is not fatal but can affect future actions (previous version: %r): %s", package, version, previous_version, result.stderr, )
python
def _install_requirement( python_bin: str, package: str, version: str = None, index_url: str = None, clean: bool = True ) -> None: """Install requirements specified using suggested pip binary.""" previous_version = _pipdeptree(python_bin, package) try: cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}".format(python_bin, quote(package)) if version: cmd += "=={}".format(quote(version)) if index_url: cmd += ' --index-url "{}" '.format(quote(index_url)) # Supply trusted host by default so we do not get errors - it safe to # do it here as package indexes are managed by Thoth. trusted_host = urlparse(index_url).netloc cmd += " --trusted-host {}".format(trusted_host) _LOGGER.debug("Installing requirement %r in version %r", package, version) run_command(cmd) yield finally: if clean: _LOGGER.debug("Removing installed package %r", package) cmd = "{} -m pip uninstall --yes {}".format(python_bin, quote(package)) result = run_command(cmd, raise_on_error=False) if result.return_code != 0: _LOGGER.warning( "Failed to restore previous environment by removing package %r (installed version %r), " "the error is not fatal but can affect future actions: %s", package, version, result.stderr, ) _LOGGER.debug( "Restoring previous environment setup after installation of %r (%s)", package, previous_version ) if previous_version: cmd = "{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}".format( python_bin, quote(package), quote(previous_version["package"]["installed_version"]) ) result = run_command(cmd, raise_on_error=False) if result.return_code != 0: _LOGGER.warning( "Failed to restore previous environment for package %r (installed version %r), " ", the error is not fatal but can affect future actions (previous version: %r): %s", package, version, previous_version, result.stderr, )
[ "def", "_install_requirement", "(", "python_bin", ":", "str", ",", "package", ":", "str", ",", "version", ":", "str", "=", "None", ",", "index_url", ":", "str", "=", "None", ",", "clean", ":", "bool", "=", "True", ")", "->", "None", ":", "previous_version", "=", "_pipdeptree", "(", "python_bin", ",", "package", ")", "try", ":", "cmd", "=", "\"{} -m pip install --force-reinstall --no-cache-dir --no-deps {}\"", ".", "format", "(", "python_bin", ",", "quote", "(", "package", ")", ")", "if", "version", ":", "cmd", "+=", "\"=={}\"", ".", "format", "(", "quote", "(", "version", ")", ")", "if", "index_url", ":", "cmd", "+=", "' --index-url \"{}\" '", ".", "format", "(", "quote", "(", "index_url", ")", ")", "# Supply trusted host by default so we do not get errors - it safe to", "# do it here as package indexes are managed by Thoth.", "trusted_host", "=", "urlparse", "(", "index_url", ")", ".", "netloc", "cmd", "+=", "\" --trusted-host {}\"", ".", "format", "(", "trusted_host", ")", "_LOGGER", ".", "debug", "(", "\"Installing requirement %r in version %r\"", ",", "package", ",", "version", ")", "run_command", "(", "cmd", ")", "yield", "finally", ":", "if", "clean", ":", "_LOGGER", ".", "debug", "(", "\"Removing installed package %r\"", ",", "package", ")", "cmd", "=", "\"{} -m pip uninstall --yes {}\"", ".", "format", "(", "python_bin", ",", "quote", "(", "package", ")", ")", "result", "=", "run_command", "(", "cmd", ",", "raise_on_error", "=", "False", ")", "if", "result", ".", "return_code", "!=", "0", ":", "_LOGGER", ".", "warning", "(", "\"Failed to restore previous environment by removing package %r (installed version %r), \"", "\"the error is not fatal but can affect future actions: %s\"", ",", "package", ",", "version", ",", "result", ".", "stderr", ",", ")", "_LOGGER", ".", "debug", "(", "\"Restoring previous environment setup after installation of %r (%s)\"", ",", "package", ",", "previous_version", ")", "if", "previous_version", ":", "cmd", "=", "\"{} -m pip install --force-reinstall --no-cache-dir --no-deps {}=={}\"", ".", "format", "(", "python_bin", ",", "quote", "(", "package", ")", ",", "quote", "(", "previous_version", "[", "\"package\"", "]", "[", "\"installed_version\"", "]", ")", ")", "result", "=", "run_command", "(", "cmd", ",", "raise_on_error", "=", "False", ")", "if", "result", ".", "return_code", "!=", "0", ":", "_LOGGER", ".", "warning", "(", "\"Failed to restore previous environment for package %r (installed version %r), \"", "\", the error is not fatal but can affect future actions (previous version: %r): %s\"", ",", "package", ",", "version", ",", "previous_version", ",", "result", ".", "stderr", ",", ")" ]
Install requirements specified using suggested pip binary.
[ "Install", "requirements", "specified", "using", "suggested", "pip", "binary", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L103-L155
13,729
thoth-station/solver
thoth/solver/python/python.py
_pipdeptree
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]: """Get pip dependency tree by executing pipdeptree tool.""" cmd = "{} -m pipdeptree --json".format(python_bin) _LOGGER.debug("Obtaining pip dependency tree using: %r", cmd) output = run_command(cmd, is_json=True).stdout if not package_name: return output for entry in output: # In some versions pipdeptree does not work with --packages flag, do the logic on out own. # TODO: we should probably do difference of reference this output and original environment if entry["package"]["key"].lower() == package_name.lower(): return entry # The given package was not found. if warn: _LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output) return None
python
def _pipdeptree(python_bin, package_name: str = None, warn: bool = False) -> typing.Optional[dict]: """Get pip dependency tree by executing pipdeptree tool.""" cmd = "{} -m pipdeptree --json".format(python_bin) _LOGGER.debug("Obtaining pip dependency tree using: %r", cmd) output = run_command(cmd, is_json=True).stdout if not package_name: return output for entry in output: # In some versions pipdeptree does not work with --packages flag, do the logic on out own. # TODO: we should probably do difference of reference this output and original environment if entry["package"]["key"].lower() == package_name.lower(): return entry # The given package was not found. if warn: _LOGGER.warning("Package %r was not found in pipdeptree output %r", package_name, output) return None
[ "def", "_pipdeptree", "(", "python_bin", ",", "package_name", ":", "str", "=", "None", ",", "warn", ":", "bool", "=", "False", ")", "->", "typing", ".", "Optional", "[", "dict", "]", ":", "cmd", "=", "\"{} -m pipdeptree --json\"", ".", "format", "(", "python_bin", ")", "_LOGGER", ".", "debug", "(", "\"Obtaining pip dependency tree using: %r\"", ",", "cmd", ")", "output", "=", "run_command", "(", "cmd", ",", "is_json", "=", "True", ")", ".", "stdout", "if", "not", "package_name", ":", "return", "output", "for", "entry", "in", "output", ":", "# In some versions pipdeptree does not work with --packages flag, do the logic on out own.", "# TODO: we should probably do difference of reference this output and original environment", "if", "entry", "[", "\"package\"", "]", "[", "\"key\"", "]", ".", "lower", "(", ")", "==", "package_name", ".", "lower", "(", ")", ":", "return", "entry", "# The given package was not found.", "if", "warn", ":", "_LOGGER", ".", "warning", "(", "\"Package %r was not found in pipdeptree output %r\"", ",", "package_name", ",", "output", ")", "return", "None" ]
Get pip dependency tree by executing pipdeptree tool.
[ "Get", "pip", "dependency", "tree", "by", "executing", "pipdeptree", "tool", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L158-L177
13,730
thoth-station/solver
thoth/solver/python/python.py
_get_dependency_specification
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str: """Get string representation of dependency specification as provided by PythonDependencyParser.""" return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
python
def _get_dependency_specification(dep_spec: typing.List[tuple]) -> str: """Get string representation of dependency specification as provided by PythonDependencyParser.""" return ",".join(dep_range[0] + dep_range[1] for dep_range in dep_spec)
[ "def", "_get_dependency_specification", "(", "dep_spec", ":", "typing", ".", "List", "[", "tuple", "]", ")", "->", "str", ":", "return", "\",\"", ".", "join", "(", "dep_range", "[", "0", "]", "+", "dep_range", "[", "1", "]", "for", "dep_range", "in", "dep_spec", ")" ]
Get string representation of dependency specification as provided by PythonDependencyParser.
[ "Get", "string", "representation", "of", "dependency", "specification", "as", "provided", "by", "PythonDependencyParser", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L180-L182
13,731
thoth-station/solver
thoth/solver/python/python.py
resolve
def resolve( requirements: typing.List[str], index_urls: list = None, python_version: int = 3, exclude_packages: set = None, transitive: bool = True, subgraph_check_api: str = None, ) -> dict: """Resolve given requirements for the given Python version.""" assert python_version in (2, 3), "Unknown Python version" if subgraph_check_api and not transitive: _LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved") sys.exit(2) python_bin = "python3" if python_version == 3 else "python2" run_command("virtualenv -p python3 venv") python_bin = "venv/bin/" + python_bin run_command("{} -m pip install pipdeptree".format(python_bin)) environment_details = _get_environment_details(python_bin) result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details} all_solvers = [] for index_url in index_urls: source = Source(index_url) all_solvers.append(PythonSolver(fetcher_kwargs={"source": source})) for solver in all_solvers: solver_result = _do_resolve_index( python_bin=python_bin, solver=solver, all_solvers=all_solvers, requirements=requirements, exclude_packages=exclude_packages, transitive=transitive, subgraph_check_api=subgraph_check_api, ) result["tree"].extend(solver_result["tree"]) result["errors"].extend(solver_result["errors"]) result["unparsed"].extend(solver_result["unparsed"]) result["unresolved"].extend(solver_result["unresolved"]) return result
python
def resolve( requirements: typing.List[str], index_urls: list = None, python_version: int = 3, exclude_packages: set = None, transitive: bool = True, subgraph_check_api: str = None, ) -> dict: """Resolve given requirements for the given Python version.""" assert python_version in (2, 3), "Unknown Python version" if subgraph_check_api and not transitive: _LOGGER.error("The check against subgraph API cannot be done if no transitive dependencies are resolved") sys.exit(2) python_bin = "python3" if python_version == 3 else "python2" run_command("virtualenv -p python3 venv") python_bin = "venv/bin/" + python_bin run_command("{} -m pip install pipdeptree".format(python_bin)) environment_details = _get_environment_details(python_bin) result = {"tree": [], "errors": [], "unparsed": [], "unresolved": [], "environment": environment_details} all_solvers = [] for index_url in index_urls: source = Source(index_url) all_solvers.append(PythonSolver(fetcher_kwargs={"source": source})) for solver in all_solvers: solver_result = _do_resolve_index( python_bin=python_bin, solver=solver, all_solvers=all_solvers, requirements=requirements, exclude_packages=exclude_packages, transitive=transitive, subgraph_check_api=subgraph_check_api, ) result["tree"].extend(solver_result["tree"]) result["errors"].extend(solver_result["errors"]) result["unparsed"].extend(solver_result["unparsed"]) result["unresolved"].extend(solver_result["unresolved"]) return result
[ "def", "resolve", "(", "requirements", ":", "typing", ".", "List", "[", "str", "]", ",", "index_urls", ":", "list", "=", "None", ",", "python_version", ":", "int", "=", "3", ",", "exclude_packages", ":", "set", "=", "None", ",", "transitive", ":", "bool", "=", "True", ",", "subgraph_check_api", ":", "str", "=", "None", ",", ")", "->", "dict", ":", "assert", "python_version", "in", "(", "2", ",", "3", ")", ",", "\"Unknown Python version\"", "if", "subgraph_check_api", "and", "not", "transitive", ":", "_LOGGER", ".", "error", "(", "\"The check against subgraph API cannot be done if no transitive dependencies are resolved\"", ")", "sys", ".", "exit", "(", "2", ")", "python_bin", "=", "\"python3\"", "if", "python_version", "==", "3", "else", "\"python2\"", "run_command", "(", "\"virtualenv -p python3 venv\"", ")", "python_bin", "=", "\"venv/bin/\"", "+", "python_bin", "run_command", "(", "\"{} -m pip install pipdeptree\"", ".", "format", "(", "python_bin", ")", ")", "environment_details", "=", "_get_environment_details", "(", "python_bin", ")", "result", "=", "{", "\"tree\"", ":", "[", "]", ",", "\"errors\"", ":", "[", "]", ",", "\"unparsed\"", ":", "[", "]", ",", "\"unresolved\"", ":", "[", "]", ",", "\"environment\"", ":", "environment_details", "}", "all_solvers", "=", "[", "]", "for", "index_url", "in", "index_urls", ":", "source", "=", "Source", "(", "index_url", ")", "all_solvers", ".", "append", "(", "PythonSolver", "(", "fetcher_kwargs", "=", "{", "\"source\"", ":", "source", "}", ")", ")", "for", "solver", "in", "all_solvers", ":", "solver_result", "=", "_do_resolve_index", "(", "python_bin", "=", "python_bin", ",", "solver", "=", "solver", ",", "all_solvers", "=", "all_solvers", ",", "requirements", "=", "requirements", ",", "exclude_packages", "=", "exclude_packages", ",", "transitive", "=", "transitive", ",", "subgraph_check_api", "=", "subgraph_check_api", ",", ")", "result", "[", "\"tree\"", "]", ".", "extend", "(", "solver_result", "[", "\"tree\"", "]", ")", "result", "[", "\"errors\"", "]", ".", "extend", "(", "solver_result", "[", "\"errors\"", "]", ")", "result", "[", "\"unparsed\"", "]", ".", "extend", "(", "solver_result", "[", "\"unparsed\"", "]", ")", "result", "[", "\"unresolved\"", "]", ".", "extend", "(", "solver_result", "[", "\"unresolved\"", "]", ")", "return", "result" ]
Resolve given requirements for the given Python version.
[ "Resolve", "given", "requirements", "for", "the", "given", "Python", "version", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python.py#L356-L401
13,732
thoth-station/solver
thoth/solver/python/python_solver.py
PythonReleasesFetcher.fetch_releases
def fetch_releases(self, package_name): """Fetch package and index_url for a package_name.""" package_name = self.source.normalize_package_name(package_name) releases = self.source.get_package_versions(package_name) releases_with_index_url = [(item, self.index_url) for item in releases] return package_name, releases_with_index_url
python
def fetch_releases(self, package_name): """Fetch package and index_url for a package_name.""" package_name = self.source.normalize_package_name(package_name) releases = self.source.get_package_versions(package_name) releases_with_index_url = [(item, self.index_url) for item in releases] return package_name, releases_with_index_url
[ "def", "fetch_releases", "(", "self", ",", "package_name", ")", ":", "package_name", "=", "self", ".", "source", ".", "normalize_package_name", "(", "package_name", ")", "releases", "=", "self", ".", "source", ".", "get_package_versions", "(", "package_name", ")", "releases_with_index_url", "=", "[", "(", "item", ",", "self", ".", "index_url", ")", "for", "item", "in", "releases", "]", "return", "package_name", ",", "releases_with_index_url" ]
Fetch package and index_url for a package_name.
[ "Fetch", "package", "and", "index_url", "for", "a", "package_name", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python_solver.py#L49-L54
13,733
thoth-station/solver
thoth/solver/python/python_solver.py
PythonDependencyParser.parse_python
def parse_python(spec): # Ignore PyDocStyleBear """Parse PyPI specification of a single dependency. :param spec: str, for example "Django>=1.5,<1.8" :return: [Django [[('>=', '1.5'), ('<', '1.8')]]] """ def _extract_op_version(spec): # https://www.python.org/dev/peps/pep-0440/#compatible-release if spec.operator == "~=": version = spec.version.split(".") if len(version) in {2, 3, 4}: if len(version) in {3, 4}: del version[-1] # will increase the last but one in next line version[-1] = str(int(version[-1]) + 1) else: raise ValueError("%r must not be used with %r" % (spec.operator, spec.version)) return [(">=", spec.version), ("<", ".".join(version))] # Trailing .* is permitted per # https://www.python.org/dev/peps/pep-0440/#version-matching elif spec.operator == "==" and spec.version.endswith(".*"): try: result = check_output(["/usr/bin/semver-ranger", spec.version], universal_newlines=True).strip() gte, lt = result.split() return [(">=", gte.lstrip(">=")), ("<", lt.lstrip("<"))] except ValueError: _LOGGER.warning("couldn't resolve ==%s", spec.version) return spec.operator, spec.version # https://www.python.org/dev/peps/pep-0440/#arbitrary-equality # Use of this operator is heavily discouraged, so just convert it to 'Version matching' elif spec.operator == "===": return "==", spec.version else: return spec.operator, spec.version def _get_pip_spec(requirements): """There is no `specs` field In Pip 8+, take info from `specifier` field.""" if hasattr(requirements, "specs"): return requirements.specs elif hasattr(requirements, "specifier"): specs = [_extract_op_version(spec) for spec in requirements.specifier] if len(specs) == 0: # TODO: I'm not sure with this one # we should probably return None instead and let pip deal with this specs = [(">=", "0.0.0")] return specs _LOGGER.info("Parsing dependency %r", spec) # create a temporary file and store the spec there since # `parse_requirements` requires a file with NamedTemporaryFile(mode="w+", suffix="pysolve") as f: f.write(spec) f.flush() parsed = parse_requirements(f.name, session=f.name) dependency = [Dependency(x.name, _get_pip_spec(x.req)) for x in parsed].pop() return dependency
python
def parse_python(spec): # Ignore PyDocStyleBear """Parse PyPI specification of a single dependency. :param spec: str, for example "Django>=1.5,<1.8" :return: [Django [[('>=', '1.5'), ('<', '1.8')]]] """ def _extract_op_version(spec): # https://www.python.org/dev/peps/pep-0440/#compatible-release if spec.operator == "~=": version = spec.version.split(".") if len(version) in {2, 3, 4}: if len(version) in {3, 4}: del version[-1] # will increase the last but one in next line version[-1] = str(int(version[-1]) + 1) else: raise ValueError("%r must not be used with %r" % (spec.operator, spec.version)) return [(">=", spec.version), ("<", ".".join(version))] # Trailing .* is permitted per # https://www.python.org/dev/peps/pep-0440/#version-matching elif spec.operator == "==" and spec.version.endswith(".*"): try: result = check_output(["/usr/bin/semver-ranger", spec.version], universal_newlines=True).strip() gte, lt = result.split() return [(">=", gte.lstrip(">=")), ("<", lt.lstrip("<"))] except ValueError: _LOGGER.warning("couldn't resolve ==%s", spec.version) return spec.operator, spec.version # https://www.python.org/dev/peps/pep-0440/#arbitrary-equality # Use of this operator is heavily discouraged, so just convert it to 'Version matching' elif spec.operator == "===": return "==", spec.version else: return spec.operator, spec.version def _get_pip_spec(requirements): """There is no `specs` field In Pip 8+, take info from `specifier` field.""" if hasattr(requirements, "specs"): return requirements.specs elif hasattr(requirements, "specifier"): specs = [_extract_op_version(spec) for spec in requirements.specifier] if len(specs) == 0: # TODO: I'm not sure with this one # we should probably return None instead and let pip deal with this specs = [(">=", "0.0.0")] return specs _LOGGER.info("Parsing dependency %r", spec) # create a temporary file and store the spec there since # `parse_requirements` requires a file with NamedTemporaryFile(mode="w+", suffix="pysolve") as f: f.write(spec) f.flush() parsed = parse_requirements(f.name, session=f.name) dependency = [Dependency(x.name, _get_pip_spec(x.req)) for x in parsed].pop() return dependency
[ "def", "parse_python", "(", "spec", ")", ":", "# Ignore PyDocStyleBear", "def", "_extract_op_version", "(", "spec", ")", ":", "# https://www.python.org/dev/peps/pep-0440/#compatible-release", "if", "spec", ".", "operator", "==", "\"~=\"", ":", "version", "=", "spec", ".", "version", ".", "split", "(", "\".\"", ")", "if", "len", "(", "version", ")", "in", "{", "2", ",", "3", ",", "4", "}", ":", "if", "len", "(", "version", ")", "in", "{", "3", ",", "4", "}", ":", "del", "version", "[", "-", "1", "]", "# will increase the last but one in next line", "version", "[", "-", "1", "]", "=", "str", "(", "int", "(", "version", "[", "-", "1", "]", ")", "+", "1", ")", "else", ":", "raise", "ValueError", "(", "\"%r must not be used with %r\"", "%", "(", "spec", ".", "operator", ",", "spec", ".", "version", ")", ")", "return", "[", "(", "\">=\"", ",", "spec", ".", "version", ")", ",", "(", "\"<\"", ",", "\".\"", ".", "join", "(", "version", ")", ")", "]", "# Trailing .* is permitted per", "# https://www.python.org/dev/peps/pep-0440/#version-matching", "elif", "spec", ".", "operator", "==", "\"==\"", "and", "spec", ".", "version", ".", "endswith", "(", "\".*\"", ")", ":", "try", ":", "result", "=", "check_output", "(", "[", "\"/usr/bin/semver-ranger\"", ",", "spec", ".", "version", "]", ",", "universal_newlines", "=", "True", ")", ".", "strip", "(", ")", "gte", ",", "lt", "=", "result", ".", "split", "(", ")", "return", "[", "(", "\">=\"", ",", "gte", ".", "lstrip", "(", "\">=\"", ")", ")", ",", "(", "\"<\"", ",", "lt", ".", "lstrip", "(", "\"<\"", ")", ")", "]", "except", "ValueError", ":", "_LOGGER", ".", "warning", "(", "\"couldn't resolve ==%s\"", ",", "spec", ".", "version", ")", "return", "spec", ".", "operator", ",", "spec", ".", "version", "# https://www.python.org/dev/peps/pep-0440/#arbitrary-equality", "# Use of this operator is heavily discouraged, so just convert it to 'Version matching'", "elif", "spec", ".", "operator", "==", "\"===\"", ":", "return", "\"==\"", ",", "spec", ".", "version", "else", ":", "return", "spec", ".", "operator", ",", "spec", ".", "version", "def", "_get_pip_spec", "(", "requirements", ")", ":", "\"\"\"There is no `specs` field In Pip 8+, take info from `specifier` field.\"\"\"", "if", "hasattr", "(", "requirements", ",", "\"specs\"", ")", ":", "return", "requirements", ".", "specs", "elif", "hasattr", "(", "requirements", ",", "\"specifier\"", ")", ":", "specs", "=", "[", "_extract_op_version", "(", "spec", ")", "for", "spec", "in", "requirements", ".", "specifier", "]", "if", "len", "(", "specs", ")", "==", "0", ":", "# TODO: I'm not sure with this one", "# we should probably return None instead and let pip deal with this", "specs", "=", "[", "(", "\">=\"", ",", "\"0.0.0\"", ")", "]", "return", "specs", "_LOGGER", ".", "info", "(", "\"Parsing dependency %r\"", ",", "spec", ")", "# create a temporary file and store the spec there since", "# `parse_requirements` requires a file", "with", "NamedTemporaryFile", "(", "mode", "=", "\"w+\"", ",", "suffix", "=", "\"pysolve\"", ")", "as", "f", ":", "f", ".", "write", "(", "spec", ")", "f", ".", "flush", "(", ")", "parsed", "=", "parse_requirements", "(", "f", ".", "name", ",", "session", "=", "f", ".", "name", ")", "dependency", "=", "[", "Dependency", "(", "x", ".", "name", ",", "_get_pip_spec", "(", "x", ".", "req", ")", ")", "for", "x", "in", "parsed", "]", ".", "pop", "(", ")", "return", "dependency" ]
Parse PyPI specification of a single dependency. :param spec: str, for example "Django>=1.5,<1.8" :return: [Django [[('>=', '1.5'), ('<', '1.8')]]]
[ "Parse", "PyPI", "specification", "of", "a", "single", "dependency", "." ]
de9bd6e744cb4d5f70320ba77d6875ccb8b876c4
https://github.com/thoth-station/solver/blob/de9bd6e744cb4d5f70320ba77d6875ccb8b876c4/thoth/solver/python/python_solver.py#L66-L122
13,734
floyernick/fleep-py
fleep/__init__.py
get
def get(obj): """ Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance """ if not isinstance(obj, bytes): raise TypeError("object type must be bytes") info = { "type": dict(), "extension": dict(), "mime": dict() } stream = " ".join(['{:02X}'.format(byte) for byte in obj]) for element in data: for signature in element["signature"]: offset = element["offset"] * 2 + element["offset"] if signature == stream[offset:len(signature) + offset]: for key in ["type", "extension", "mime"]: info[key][element[key]] = len(signature) for key in ["type", "extension", "mime"]: info[key] = [element for element in sorted(info[key], key=info[key].get, reverse=True)] return Info(info["type"], info["extension"], info["mime"])
python
def get(obj): """ Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance """ if not isinstance(obj, bytes): raise TypeError("object type must be bytes") info = { "type": dict(), "extension": dict(), "mime": dict() } stream = " ".join(['{:02X}'.format(byte) for byte in obj]) for element in data: for signature in element["signature"]: offset = element["offset"] * 2 + element["offset"] if signature == stream[offset:len(signature) + offset]: for key in ["type", "extension", "mime"]: info[key][element[key]] = len(signature) for key in ["type", "extension", "mime"]: info[key] = [element for element in sorted(info[key], key=info[key].get, reverse=True)] return Info(info["type"], info["extension"], info["mime"])
[ "def", "get", "(", "obj", ")", ":", "if", "not", "isinstance", "(", "obj", ",", "bytes", ")", ":", "raise", "TypeError", "(", "\"object type must be bytes\"", ")", "info", "=", "{", "\"type\"", ":", "dict", "(", ")", ",", "\"extension\"", ":", "dict", "(", ")", ",", "\"mime\"", ":", "dict", "(", ")", "}", "stream", "=", "\" \"", ".", "join", "(", "[", "'{:02X}'", ".", "format", "(", "byte", ")", "for", "byte", "in", "obj", "]", ")", "for", "element", "in", "data", ":", "for", "signature", "in", "element", "[", "\"signature\"", "]", ":", "offset", "=", "element", "[", "\"offset\"", "]", "*", "2", "+", "element", "[", "\"offset\"", "]", "if", "signature", "==", "stream", "[", "offset", ":", "len", "(", "signature", ")", "+", "offset", "]", ":", "for", "key", "in", "[", "\"type\"", ",", "\"extension\"", ",", "\"mime\"", "]", ":", "info", "[", "key", "]", "[", "element", "[", "key", "]", "]", "=", "len", "(", "signature", ")", "for", "key", "in", "[", "\"type\"", ",", "\"extension\"", ",", "\"mime\"", "]", ":", "info", "[", "key", "]", "=", "[", "element", "for", "element", "in", "sorted", "(", "info", "[", "key", "]", ",", "key", "=", "info", "[", "key", "]", ".", "get", ",", "reverse", "=", "True", ")", "]", "return", "Info", "(", "info", "[", "\"type\"", "]", ",", "info", "[", "\"extension\"", "]", ",", "info", "[", "\"mime\"", "]", ")" ]
Determines file format and picks suitable file types, extensions and MIME types Takes: obj (bytes) -> byte sequence (128 bytes are enough) Returns: (<class 'fleep.Info'>) -> Class instance
[ "Determines", "file", "format", "and", "picks", "suitable", "file", "types", "extensions", "and", "MIME", "types" ]
994bc2c274482d80ab13d89d8f7343eb316d3e44
https://github.com/floyernick/fleep-py/blob/994bc2c274482d80ab13d89d8f7343eb316d3e44/fleep/__init__.py#L50-L82
13,735
scikit-tda/persim
persim/plot.py
bottleneck_matching
def bottleneck_matching(I1, I2, matchidx, D, labels=["dgm1", "dgm2"], ax=None): """ Visualize bottleneck matching between two diagrams Parameters =========== I1: array A diagram I2: array A diagram matchidx: tuples of matched indices if input `matching=True`, then return matching D: array cross-similarity matrix labels: list of strings names of diagrams for legend. Default = ["dgm1", "dgm2"], ax: matplotlib Axis object For plotting on a particular axis. """ plot_diagrams([I1, I2], labels=labels, ax=ax) cp = np.cos(np.pi / 4) sp = np.sin(np.pi / 4) R = np.array([[cp, -sp], [sp, cp]]) if I1.size == 0: I1 = np.array([[0, 0]]) if I2.size == 0: I2 = np.array([[0, 0]]) I1Rot = I1.dot(R) I2Rot = I2.dot(R) dists = [D[i, j] for (i, j) in matchidx] (i, j) = matchidx[np.argmax(dists)] if i >= I1.shape[0] and j >= I2.shape[0]: return if i >= I1.shape[0]: diagElem = np.array([I2Rot[j, 0], 0]) diagElem = diagElem.dot(R.T) plt.plot([I2[j, 0], diagElem[0]], [I2[j, 1], diagElem[1]], "g") elif j >= I2.shape[0]: diagElem = np.array([I1Rot[i, 0], 0]) diagElem = diagElem.dot(R.T) plt.plot([I1[i, 0], diagElem[0]], [I1[i, 1], diagElem[1]], "g") else: plt.plot([I1[i, 0], I2[j, 0]], [I1[i, 1], I2[j, 1]], "g")
python
def bottleneck_matching(I1, I2, matchidx, D, labels=["dgm1", "dgm2"], ax=None): """ Visualize bottleneck matching between two diagrams Parameters =========== I1: array A diagram I2: array A diagram matchidx: tuples of matched indices if input `matching=True`, then return matching D: array cross-similarity matrix labels: list of strings names of diagrams for legend. Default = ["dgm1", "dgm2"], ax: matplotlib Axis object For plotting on a particular axis. """ plot_diagrams([I1, I2], labels=labels, ax=ax) cp = np.cos(np.pi / 4) sp = np.sin(np.pi / 4) R = np.array([[cp, -sp], [sp, cp]]) if I1.size == 0: I1 = np.array([[0, 0]]) if I2.size == 0: I2 = np.array([[0, 0]]) I1Rot = I1.dot(R) I2Rot = I2.dot(R) dists = [D[i, j] for (i, j) in matchidx] (i, j) = matchidx[np.argmax(dists)] if i >= I1.shape[0] and j >= I2.shape[0]: return if i >= I1.shape[0]: diagElem = np.array([I2Rot[j, 0], 0]) diagElem = diagElem.dot(R.T) plt.plot([I2[j, 0], diagElem[0]], [I2[j, 1], diagElem[1]], "g") elif j >= I2.shape[0]: diagElem = np.array([I1Rot[i, 0], 0]) diagElem = diagElem.dot(R.T) plt.plot([I1[i, 0], diagElem[0]], [I1[i, 1], diagElem[1]], "g") else: plt.plot([I1[i, 0], I2[j, 0]], [I1[i, 1], I2[j, 1]], "g")
[ "def", "bottleneck_matching", "(", "I1", ",", "I2", ",", "matchidx", ",", "D", ",", "labels", "=", "[", "\"dgm1\"", ",", "\"dgm2\"", "]", ",", "ax", "=", "None", ")", ":", "plot_diagrams", "(", "[", "I1", ",", "I2", "]", ",", "labels", "=", "labels", ",", "ax", "=", "ax", ")", "cp", "=", "np", ".", "cos", "(", "np", ".", "pi", "/", "4", ")", "sp", "=", "np", ".", "sin", "(", "np", ".", "pi", "/", "4", ")", "R", "=", "np", ".", "array", "(", "[", "[", "cp", ",", "-", "sp", "]", ",", "[", "sp", ",", "cp", "]", "]", ")", "if", "I1", ".", "size", "==", "0", ":", "I1", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", "]", "]", ")", "if", "I2", ".", "size", "==", "0", ":", "I2", "=", "np", ".", "array", "(", "[", "[", "0", ",", "0", "]", "]", ")", "I1Rot", "=", "I1", ".", "dot", "(", "R", ")", "I2Rot", "=", "I2", ".", "dot", "(", "R", ")", "dists", "=", "[", "D", "[", "i", ",", "j", "]", "for", "(", "i", ",", "j", ")", "in", "matchidx", "]", "(", "i", ",", "j", ")", "=", "matchidx", "[", "np", ".", "argmax", "(", "dists", ")", "]", "if", "i", ">=", "I1", ".", "shape", "[", "0", "]", "and", "j", ">=", "I2", ".", "shape", "[", "0", "]", ":", "return", "if", "i", ">=", "I1", ".", "shape", "[", "0", "]", ":", "diagElem", "=", "np", ".", "array", "(", "[", "I2Rot", "[", "j", ",", "0", "]", ",", "0", "]", ")", "diagElem", "=", "diagElem", ".", "dot", "(", "R", ".", "T", ")", "plt", ".", "plot", "(", "[", "I2", "[", "j", ",", "0", "]", ",", "diagElem", "[", "0", "]", "]", ",", "[", "I2", "[", "j", ",", "1", "]", ",", "diagElem", "[", "1", "]", "]", ",", "\"g\"", ")", "elif", "j", ">=", "I2", ".", "shape", "[", "0", "]", ":", "diagElem", "=", "np", ".", "array", "(", "[", "I1Rot", "[", "i", ",", "0", "]", ",", "0", "]", ")", "diagElem", "=", "diagElem", ".", "dot", "(", "R", ".", "T", ")", "plt", ".", "plot", "(", "[", "I1", "[", "i", ",", "0", "]", ",", "diagElem", "[", "0", "]", "]", ",", "[", "I1", "[", "i", ",", "1", "]", ",", "diagElem", "[", "1", "]", "]", ",", "\"g\"", ")", "else", ":", "plt", ".", "plot", "(", "[", "I1", "[", "i", ",", "0", "]", ",", "I2", "[", "j", ",", "0", "]", "]", ",", "[", "I1", "[", "i", ",", "1", "]", ",", "I2", "[", "j", ",", "1", "]", "]", ",", "\"g\"", ")" ]
Visualize bottleneck matching between two diagrams Parameters =========== I1: array A diagram I2: array A diagram matchidx: tuples of matched indices if input `matching=True`, then return matching D: array cross-similarity matrix labels: list of strings names of diagrams for legend. Default = ["dgm1", "dgm2"], ax: matplotlib Axis object For plotting on a particular axis.
[ "Visualize", "bottleneck", "matching", "between", "two", "diagrams" ]
f234f543058bdedb9729bf8c4a90da41e57954e0
https://github.com/scikit-tda/persim/blob/f234f543058bdedb9729bf8c4a90da41e57954e0/persim/plot.py#L9-L53
13,736
scikit-tda/persim
persim/images.py
PersImage.transform
def transform(self, diagrams): """ Convert diagram or list of diagrams to a persistence image. Parameters ----------- diagrams : list of or singleton diagram, list of pairs. [(birth, death)] Persistence diagrams to be converted to persistence images. It is assumed they are in (birth, death) format. Can input a list of diagrams or a single diagram. """ # if diagram is empty, return empty image if len(diagrams) == 0: return np.zeros((self.nx, self.ny)) # if first entry of first entry is not iterable, then diagrams is singular and we need to make it a list of diagrams try: singular = not isinstance(diagrams[0][0], collections.Iterable) except IndexError: singular = False if singular: diagrams = [diagrams] dgs = [np.copy(diagram, np.float64) for diagram in diagrams] landscapes = [PersImage.to_landscape(dg) for dg in dgs] if not self.specs: self.specs = { "maxBD": np.max([np.max(np.vstack((landscape, np.zeros((1, 2))))) for landscape in landscapes] + [0]), "minBD": np.min([np.min(np.vstack((landscape, np.zeros((1, 2))))) for landscape in landscapes] + [0]), } imgs = [self._transform(dgm) for dgm in landscapes] # Make sure we return one item. if singular: imgs = imgs[0] return imgs
python
def transform(self, diagrams): """ Convert diagram or list of diagrams to a persistence image. Parameters ----------- diagrams : list of or singleton diagram, list of pairs. [(birth, death)] Persistence diagrams to be converted to persistence images. It is assumed they are in (birth, death) format. Can input a list of diagrams or a single diagram. """ # if diagram is empty, return empty image if len(diagrams) == 0: return np.zeros((self.nx, self.ny)) # if first entry of first entry is not iterable, then diagrams is singular and we need to make it a list of diagrams try: singular = not isinstance(diagrams[0][0], collections.Iterable) except IndexError: singular = False if singular: diagrams = [diagrams] dgs = [np.copy(diagram, np.float64) for diagram in diagrams] landscapes = [PersImage.to_landscape(dg) for dg in dgs] if not self.specs: self.specs = { "maxBD": np.max([np.max(np.vstack((landscape, np.zeros((1, 2))))) for landscape in landscapes] + [0]), "minBD": np.min([np.min(np.vstack((landscape, np.zeros((1, 2))))) for landscape in landscapes] + [0]), } imgs = [self._transform(dgm) for dgm in landscapes] # Make sure we return one item. if singular: imgs = imgs[0] return imgs
[ "def", "transform", "(", "self", ",", "diagrams", ")", ":", "# if diagram is empty, return empty image", "if", "len", "(", "diagrams", ")", "==", "0", ":", "return", "np", ".", "zeros", "(", "(", "self", ".", "nx", ",", "self", ".", "ny", ")", ")", "# if first entry of first entry is not iterable, then diagrams is singular and we need to make it a list of diagrams", "try", ":", "singular", "=", "not", "isinstance", "(", "diagrams", "[", "0", "]", "[", "0", "]", ",", "collections", ".", "Iterable", ")", "except", "IndexError", ":", "singular", "=", "False", "if", "singular", ":", "diagrams", "=", "[", "diagrams", "]", "dgs", "=", "[", "np", ".", "copy", "(", "diagram", ",", "np", ".", "float64", ")", "for", "diagram", "in", "diagrams", "]", "landscapes", "=", "[", "PersImage", ".", "to_landscape", "(", "dg", ")", "for", "dg", "in", "dgs", "]", "if", "not", "self", ".", "specs", ":", "self", ".", "specs", "=", "{", "\"maxBD\"", ":", "np", ".", "max", "(", "[", "np", ".", "max", "(", "np", ".", "vstack", "(", "(", "landscape", ",", "np", ".", "zeros", "(", "(", "1", ",", "2", ")", ")", ")", ")", ")", "for", "landscape", "in", "landscapes", "]", "+", "[", "0", "]", ")", ",", "\"minBD\"", ":", "np", ".", "min", "(", "[", "np", ".", "min", "(", "np", ".", "vstack", "(", "(", "landscape", ",", "np", ".", "zeros", "(", "(", "1", ",", "2", ")", ")", ")", ")", ")", "for", "landscape", "in", "landscapes", "]", "+", "[", "0", "]", ")", ",", "}", "imgs", "=", "[", "self", ".", "_transform", "(", "dgm", ")", "for", "dgm", "in", "landscapes", "]", "# Make sure we return one item.", "if", "singular", ":", "imgs", "=", "imgs", "[", "0", "]", "return", "imgs" ]
Convert diagram or list of diagrams to a persistence image. Parameters ----------- diagrams : list of or singleton diagram, list of pairs. [(birth, death)] Persistence diagrams to be converted to persistence images. It is assumed they are in (birth, death) format. Can input a list of diagrams or a single diagram.
[ "Convert", "diagram", "or", "list", "of", "diagrams", "to", "a", "persistence", "image", "." ]
f234f543058bdedb9729bf8c4a90da41e57954e0
https://github.com/scikit-tda/persim/blob/f234f543058bdedb9729bf8c4a90da41e57954e0/persim/images.py#L72-L110
13,737
scikit-tda/persim
persim/images.py
PersImage.weighting
def weighting(self, landscape=None): """ Define a weighting function, for stability results to hold, the function must be 0 at y=0. """ # TODO: Implement a logistic function # TODO: use self.weighting_type to choose function if landscape is not None: if len(landscape) > 0: maxy = np.max(landscape[:, 1]) else: maxy = 1 def linear(interval): # linear function of y such that f(0) = 0 and f(max(y)) = 1 d = interval[1] return (1 / maxy) * d if landscape is not None else d def pw_linear(interval): """ This is the function defined as w_b(t) in the original PI paper Take b to be maxy/self.ny to effectively zero out the bottom pixel row """ t = interval[1] b = maxy / self.ny if t <= 0: return 0 if 0 < t < b: return t / b if b <= t: return 1 return linear
python
def weighting(self, landscape=None): """ Define a weighting function, for stability results to hold, the function must be 0 at y=0. """ # TODO: Implement a logistic function # TODO: use self.weighting_type to choose function if landscape is not None: if len(landscape) > 0: maxy = np.max(landscape[:, 1]) else: maxy = 1 def linear(interval): # linear function of y such that f(0) = 0 and f(max(y)) = 1 d = interval[1] return (1 / maxy) * d if landscape is not None else d def pw_linear(interval): """ This is the function defined as w_b(t) in the original PI paper Take b to be maxy/self.ny to effectively zero out the bottom pixel row """ t = interval[1] b = maxy / self.ny if t <= 0: return 0 if 0 < t < b: return t / b if b <= t: return 1 return linear
[ "def", "weighting", "(", "self", ",", "landscape", "=", "None", ")", ":", "# TODO: Implement a logistic function", "# TODO: use self.weighting_type to choose function", "if", "landscape", "is", "not", "None", ":", "if", "len", "(", "landscape", ")", ">", "0", ":", "maxy", "=", "np", ".", "max", "(", "landscape", "[", ":", ",", "1", "]", ")", "else", ":", "maxy", "=", "1", "def", "linear", "(", "interval", ")", ":", "# linear function of y such that f(0) = 0 and f(max(y)) = 1", "d", "=", "interval", "[", "1", "]", "return", "(", "1", "/", "maxy", ")", "*", "d", "if", "landscape", "is", "not", "None", "else", "d", "def", "pw_linear", "(", "interval", ")", ":", "\"\"\" This is the function defined as w_b(t) in the original PI paper\n\n Take b to be maxy/self.ny to effectively zero out the bottom pixel row\n \"\"\"", "t", "=", "interval", "[", "1", "]", "b", "=", "maxy", "/", "self", ".", "ny", "if", "t", "<=", "0", ":", "return", "0", "if", "0", "<", "t", "<", "b", ":", "return", "t", "/", "b", "if", "b", "<=", "t", ":", "return", "1", "return", "linear" ]
Define a weighting function, for stability results to hold, the function must be 0 at y=0.
[ "Define", "a", "weighting", "function", "for", "stability", "results", "to", "hold", "the", "function", "must", "be", "0", "at", "y", "=", "0", "." ]
f234f543058bdedb9729bf8c4a90da41e57954e0
https://github.com/scikit-tda/persim/blob/f234f543058bdedb9729bf8c4a90da41e57954e0/persim/images.py#L143-L178
13,738
scikit-tda/persim
persim/images.py
PersImage.show
def show(self, imgs, ax=None): """ Visualize the persistence image """ ax = ax or plt.gca() if type(imgs) is not list: imgs = [imgs] for i, img in enumerate(imgs): ax.imshow(img, cmap=plt.get_cmap("plasma")) ax.axis("off")
python
def show(self, imgs, ax=None): """ Visualize the persistence image """ ax = ax or plt.gca() if type(imgs) is not list: imgs = [imgs] for i, img in enumerate(imgs): ax.imshow(img, cmap=plt.get_cmap("plasma")) ax.axis("off")
[ "def", "show", "(", "self", ",", "imgs", ",", "ax", "=", "None", ")", ":", "ax", "=", "ax", "or", "plt", ".", "gca", "(", ")", "if", "type", "(", "imgs", ")", "is", "not", "list", ":", "imgs", "=", "[", "imgs", "]", "for", "i", ",", "img", "in", "enumerate", "(", "imgs", ")", ":", "ax", ".", "imshow", "(", "img", ",", "cmap", "=", "plt", ".", "get_cmap", "(", "\"plasma\"", ")", ")", "ax", ".", "axis", "(", "\"off\"", ")" ]
Visualize the persistence image
[ "Visualize", "the", "persistence", "image" ]
f234f543058bdedb9729bf8c4a90da41e57954e0
https://github.com/scikit-tda/persim/blob/f234f543058bdedb9729bf8c4a90da41e57954e0/persim/images.py#L200-L212
13,739
pivotal-energy-solutions/django-datatable-view
datatableview/utils.py
resolve_orm_path
def resolve_orm_path(model, orm_path): """ Follows the queryset-style query path of ``orm_path`` starting from ``model`` class. If the path ends up referring to a bad field name, ``django.db.models.fields.FieldDoesNotExist`` will be raised. """ bits = orm_path.split('__') endpoint_model = reduce(get_model_at_related_field, [model] + bits[:-1]) if bits[-1] == 'pk': field = endpoint_model._meta.pk else: field = endpoint_model._meta.get_field(bits[-1]) return field
python
def resolve_orm_path(model, orm_path): """ Follows the queryset-style query path of ``orm_path`` starting from ``model`` class. If the path ends up referring to a bad field name, ``django.db.models.fields.FieldDoesNotExist`` will be raised. """ bits = orm_path.split('__') endpoint_model = reduce(get_model_at_related_field, [model] + bits[:-1]) if bits[-1] == 'pk': field = endpoint_model._meta.pk else: field = endpoint_model._meta.get_field(bits[-1]) return field
[ "def", "resolve_orm_path", "(", "model", ",", "orm_path", ")", ":", "bits", "=", "orm_path", ".", "split", "(", "'__'", ")", "endpoint_model", "=", "reduce", "(", "get_model_at_related_field", ",", "[", "model", "]", "+", "bits", "[", ":", "-", "1", "]", ")", "if", "bits", "[", "-", "1", "]", "==", "'pk'", ":", "field", "=", "endpoint_model", ".", "_meta", ".", "pk", "else", ":", "field", "=", "endpoint_model", ".", "_meta", ".", "get_field", "(", "bits", "[", "-", "1", "]", ")", "return", "field" ]
Follows the queryset-style query path of ``orm_path`` starting from ``model`` class. If the path ends up referring to a bad field name, ``django.db.models.fields.FieldDoesNotExist`` will be raised.
[ "Follows", "the", "queryset", "-", "style", "query", "path", "of", "orm_path", "starting", "from", "model", "class", ".", "If", "the", "path", "ends", "up", "referring", "to", "a", "bad", "field", "name", "django", ".", "db", ".", "models", ".", "fields", ".", "FieldDoesNotExist", "will", "be", "raised", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/utils.py#L56-L70
13,740
pivotal-energy-solutions/django-datatable-view
datatableview/utils.py
get_model_at_related_field
def get_model_at_related_field(model, attr): """ Looks up ``attr`` as a field of ``model`` and returns the related model class. If ``attr`` is not a relationship field, ``ValueError`` is raised. """ field = model._meta.get_field(attr) if hasattr(field, 'related_model'): return field.related_model raise ValueError("{model}.{attr} ({klass}) is not a relationship field.".format(**{ 'model': model.__name__, 'attr': attr, 'klass': field.__class__.__name__, }))
python
def get_model_at_related_field(model, attr): """ Looks up ``attr`` as a field of ``model`` and returns the related model class. If ``attr`` is not a relationship field, ``ValueError`` is raised. """ field = model._meta.get_field(attr) if hasattr(field, 'related_model'): return field.related_model raise ValueError("{model}.{attr} ({klass}) is not a relationship field.".format(**{ 'model': model.__name__, 'attr': attr, 'klass': field.__class__.__name__, }))
[ "def", "get_model_at_related_field", "(", "model", ",", "attr", ")", ":", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "attr", ")", "if", "hasattr", "(", "field", ",", "'related_model'", ")", ":", "return", "field", ".", "related_model", "raise", "ValueError", "(", "\"{model}.{attr} ({klass}) is not a relationship field.\"", ".", "format", "(", "*", "*", "{", "'model'", ":", "model", ".", "__name__", ",", "'attr'", ":", "attr", ",", "'klass'", ":", "field", ".", "__class__", ".", "__name__", ",", "}", ")", ")" ]
Looks up ``attr`` as a field of ``model`` and returns the related model class. If ``attr`` is not a relationship field, ``ValueError`` is raised.
[ "Looks", "up", "attr", "as", "a", "field", "of", "model", "and", "returns", "the", "related", "model", "class", ".", "If", "attr", "is", "not", "a", "relationship", "field", "ValueError", "is", "raised", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/utils.py#L72-L88
13,741
pivotal-energy-solutions/django-datatable-view
datatableview/utils.py
contains_plural_field
def contains_plural_field(model, fields): """ Returns a boolean indicating if ``fields`` contains a relationship to multiple items. """ source_model = model for orm_path in fields: model = source_model bits = orm_path.lstrip('+-').split('__') for bit in bits[:-1]: field = model._meta.get_field(bit) if field.many_to_many or field.one_to_many: return True model = get_model_at_related_field(model, bit) return False
python
def contains_plural_field(model, fields): """ Returns a boolean indicating if ``fields`` contains a relationship to multiple items. """ source_model = model for orm_path in fields: model = source_model bits = orm_path.lstrip('+-').split('__') for bit in bits[:-1]: field = model._meta.get_field(bit) if field.many_to_many or field.one_to_many: return True model = get_model_at_related_field(model, bit) return False
[ "def", "contains_plural_field", "(", "model", ",", "fields", ")", ":", "source_model", "=", "model", "for", "orm_path", "in", "fields", ":", "model", "=", "source_model", "bits", "=", "orm_path", ".", "lstrip", "(", "'+-'", ")", ".", "split", "(", "'__'", ")", "for", "bit", "in", "bits", "[", ":", "-", "1", "]", ":", "field", "=", "model", ".", "_meta", ".", "get_field", "(", "bit", ")", "if", "field", ".", "many_to_many", "or", "field", ".", "one_to_many", ":", "return", "True", "model", "=", "get_model_at_related_field", "(", "model", ",", "bit", ")", "return", "False" ]
Returns a boolean indicating if ``fields`` contains a relationship to multiple items.
[ "Returns", "a", "boolean", "indicating", "if", "fields", "contains", "a", "relationship", "to", "multiple", "items", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/utils.py#L97-L108
13,742
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
DatatableJSONResponseMixin.get_json_response_object
def get_json_response_object(self, datatable): """ Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary. """ # Ensure the object list is calculated. # Calling get_records() will do this implicitly, but we want simultaneous access to the # 'total_initial_record_count', and 'unpaged_record_count' values. datatable.populate_records() draw = getattr(self.request, self.request.method).get('draw', None) if draw is not None: draw = escape_uri_path(draw) response_data = { 'draw': draw, 'recordsFiltered': datatable.unpaged_record_count, 'recordsTotal': datatable.total_initial_record_count, 'data': [dict(record, **{ 'DT_RowId': record.pop('pk'), 'DT_RowData': record.pop('_extra_data'), }) for record in datatable.get_records()], } return response_data
python
def get_json_response_object(self, datatable): """ Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary. """ # Ensure the object list is calculated. # Calling get_records() will do this implicitly, but we want simultaneous access to the # 'total_initial_record_count', and 'unpaged_record_count' values. datatable.populate_records() draw = getattr(self.request, self.request.method).get('draw', None) if draw is not None: draw = escape_uri_path(draw) response_data = { 'draw': draw, 'recordsFiltered': datatable.unpaged_record_count, 'recordsTotal': datatable.total_initial_record_count, 'data': [dict(record, **{ 'DT_RowId': record.pop('pk'), 'DT_RowData': record.pop('_extra_data'), }) for record in datatable.get_records()], } return response_data
[ "def", "get_json_response_object", "(", "self", ",", "datatable", ")", ":", "# Ensure the object list is calculated.", "# Calling get_records() will do this implicitly, but we want simultaneous access to the", "# 'total_initial_record_count', and 'unpaged_record_count' values.", "datatable", ".", "populate_records", "(", ")", "draw", "=", "getattr", "(", "self", ".", "request", ",", "self", ".", "request", ".", "method", ")", ".", "get", "(", "'draw'", ",", "None", ")", "if", "draw", "is", "not", "None", ":", "draw", "=", "escape_uri_path", "(", "draw", ")", "response_data", "=", "{", "'draw'", ":", "draw", ",", "'recordsFiltered'", ":", "datatable", ".", "unpaged_record_count", ",", "'recordsTotal'", ":", "datatable", ".", "total_initial_record_count", ",", "'data'", ":", "[", "dict", "(", "record", ",", "*", "*", "{", "'DT_RowId'", ":", "record", ".", "pop", "(", "'pk'", ")", ",", "'DT_RowData'", ":", "record", ".", "pop", "(", "'_extra_data'", ")", ",", "}", ")", "for", "record", "in", "datatable", ".", "get_records", "(", ")", "]", ",", "}", "return", "response_data" ]
Returns the JSON-compatible dictionary that will be serialized for an AJAX response. The value names are in the form "s~" for strings, "i~" for integers, and "a~" for arrays, if you're unfamiliar with the old C-style jargon used in dataTables.js. "aa~" means "array of arrays". In some instances, the author uses "ao~" for "array of objects", an object being a javascript dictionary.
[ "Returns", "the", "JSON", "-", "compatible", "dictionary", "that", "will", "be", "serialized", "for", "an", "AJAX", "response", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L28-L55
13,743
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
DatatableJSONResponseMixin.serialize_to_json
def serialize_to_json(self, response_data): """ Returns the JSON string for the compiled data object. """ indent = None if settings.DEBUG: indent = 4 # Serialize to JSON with Django's encoder: Adds date/time, decimal, # and UUID support. return json.dumps(response_data, indent=indent, cls=DjangoJSONEncoder)
python
def serialize_to_json(self, response_data): """ Returns the JSON string for the compiled data object. """ indent = None if settings.DEBUG: indent = 4 # Serialize to JSON with Django's encoder: Adds date/time, decimal, # and UUID support. return json.dumps(response_data, indent=indent, cls=DjangoJSONEncoder)
[ "def", "serialize_to_json", "(", "self", ",", "response_data", ")", ":", "indent", "=", "None", "if", "settings", ".", "DEBUG", ":", "indent", "=", "4", "# Serialize to JSON with Django's encoder: Adds date/time, decimal,", "# and UUID support.", "return", "json", ".", "dumps", "(", "response_data", ",", "indent", "=", "indent", ",", "cls", "=", "DjangoJSONEncoder", ")" ]
Returns the JSON string for the compiled data object.
[ "Returns", "the", "JSON", "string", "for", "the", "compiled", "data", "object", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L57-L66
13,744
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
DatatableMixin.get_ajax
def get_ajax(self, request, *args, **kwargs): """ Called when accessed via AJAX on the request method specified by the Datatable. """ response_data = self.get_json_response_object(self._datatable) response = HttpResponse(self.serialize_to_json(response_data), content_type="application/json") return response
python
def get_ajax(self, request, *args, **kwargs): """ Called when accessed via AJAX on the request method specified by the Datatable. """ response_data = self.get_json_response_object(self._datatable) response = HttpResponse(self.serialize_to_json(response_data), content_type="application/json") return response
[ "def", "get_ajax", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "response_data", "=", "self", ".", "get_json_response_object", "(", "self", ".", "_datatable", ")", "response", "=", "HttpResponse", "(", "self", ".", "serialize_to_json", "(", "response_data", ")", ",", "content_type", "=", "\"application/json\"", ")", "return", "response" ]
Called when accessed via AJAX on the request method specified by the Datatable.
[ "Called", "when", "accessed", "via", "AJAX", "on", "the", "request", "method", "specified", "by", "the", "Datatable", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L79-L86
13,745
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
MultipleDatatableMixin.get_active_ajax_datatable
def get_active_ajax_datatable(self): """ Returns a single datatable according to the hint GET variable from an AJAX request. """ data = getattr(self.request, self.request.method) datatables_dict = self.get_datatables(only=data['datatable']) return list(datatables_dict.values())[0]
python
def get_active_ajax_datatable(self): """ Returns a single datatable according to the hint GET variable from an AJAX request. """ data = getattr(self.request, self.request.method) datatables_dict = self.get_datatables(only=data['datatable']) return list(datatables_dict.values())[0]
[ "def", "get_active_ajax_datatable", "(", "self", ")", ":", "data", "=", "getattr", "(", "self", ".", "request", ",", "self", ".", "request", ".", "method", ")", "datatables_dict", "=", "self", ".", "get_datatables", "(", "only", "=", "data", "[", "'datatable'", "]", ")", "return", "list", "(", "datatables_dict", ".", "values", "(", ")", ")", "[", "0", "]" ]
Returns a single datatable according to the hint GET variable from an AJAX request.
[ "Returns", "a", "single", "datatable", "according", "to", "the", "hint", "GET", "variable", "from", "an", "AJAX", "request", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L199-L203
13,746
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
MultipleDatatableMixin.get_datatables
def get_datatables(self, only=None): """ Returns a dict of the datatables served by this view. """ if not hasattr(self, '_datatables'): self._datatables = {} datatable_classes = self.get_datatable_classes() for name, datatable_class in datatable_classes.items(): if only and name != only: continue queryset_getter_name = 'get_%s_datatable_queryset' % (name,) queryset_getter = getattr(self, queryset_getter_name, None) if queryset_getter is None: raise ValueError("%r must declare a method %r." % (self.__class__.__name__, queryset_getter_name)) queryset = queryset_getter() if datatable_class is None: class AutoMeta: model = queryset.model opts = AutoMeta() datatable_class = Datatable else: opts = datatable_class.options_class(datatable_class._meta) kwargs = self.get_default_datatable_kwargs(object_list=queryset) kwargs_getter_name = 'get_%s_datatable_kwargs' % (name,) kwargs_getter = getattr(self, kwargs_getter_name, None) if kwargs_getter: kwargs = kwargs_getter(**kwargs) if 'url' in kwargs: kwargs['url'] = kwargs['url'] + "?datatable=%s" % (name,) for meta_opt in opts.__dict__: if meta_opt in kwargs: setattr(opts, meta_opt, kwargs.pop(meta_opt)) datatable_class = type('%s_Synthesized' % (datatable_class.__name__,), (datatable_class,), { '__module__': datatable_class.__module__, 'Meta': opts, }) self._datatables[name] = datatable_class(**kwargs) return self._datatables
python
def get_datatables(self, only=None): """ Returns a dict of the datatables served by this view. """ if not hasattr(self, '_datatables'): self._datatables = {} datatable_classes = self.get_datatable_classes() for name, datatable_class in datatable_classes.items(): if only and name != only: continue queryset_getter_name = 'get_%s_datatable_queryset' % (name,) queryset_getter = getattr(self, queryset_getter_name, None) if queryset_getter is None: raise ValueError("%r must declare a method %r." % (self.__class__.__name__, queryset_getter_name)) queryset = queryset_getter() if datatable_class is None: class AutoMeta: model = queryset.model opts = AutoMeta() datatable_class = Datatable else: opts = datatable_class.options_class(datatable_class._meta) kwargs = self.get_default_datatable_kwargs(object_list=queryset) kwargs_getter_name = 'get_%s_datatable_kwargs' % (name,) kwargs_getter = getattr(self, kwargs_getter_name, None) if kwargs_getter: kwargs = kwargs_getter(**kwargs) if 'url' in kwargs: kwargs['url'] = kwargs['url'] + "?datatable=%s" % (name,) for meta_opt in opts.__dict__: if meta_opt in kwargs: setattr(opts, meta_opt, kwargs.pop(meta_opt)) datatable_class = type('%s_Synthesized' % (datatable_class.__name__,), (datatable_class,), { '__module__': datatable_class.__module__, 'Meta': opts, }) self._datatables[name] = datatable_class(**kwargs) return self._datatables
[ "def", "get_datatables", "(", "self", ",", "only", "=", "None", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_datatables'", ")", ":", "self", ".", "_datatables", "=", "{", "}", "datatable_classes", "=", "self", ".", "get_datatable_classes", "(", ")", "for", "name", ",", "datatable_class", "in", "datatable_classes", ".", "items", "(", ")", ":", "if", "only", "and", "name", "!=", "only", ":", "continue", "queryset_getter_name", "=", "'get_%s_datatable_queryset'", "%", "(", "name", ",", ")", "queryset_getter", "=", "getattr", "(", "self", ",", "queryset_getter_name", ",", "None", ")", "if", "queryset_getter", "is", "None", ":", "raise", "ValueError", "(", "\"%r must declare a method %r.\"", "%", "(", "self", ".", "__class__", ".", "__name__", ",", "queryset_getter_name", ")", ")", "queryset", "=", "queryset_getter", "(", ")", "if", "datatable_class", "is", "None", ":", "class", "AutoMeta", ":", "model", "=", "queryset", ".", "model", "opts", "=", "AutoMeta", "(", ")", "datatable_class", "=", "Datatable", "else", ":", "opts", "=", "datatable_class", ".", "options_class", "(", "datatable_class", ".", "_meta", ")", "kwargs", "=", "self", ".", "get_default_datatable_kwargs", "(", "object_list", "=", "queryset", ")", "kwargs_getter_name", "=", "'get_%s_datatable_kwargs'", "%", "(", "name", ",", ")", "kwargs_getter", "=", "getattr", "(", "self", ",", "kwargs_getter_name", ",", "None", ")", "if", "kwargs_getter", ":", "kwargs", "=", "kwargs_getter", "(", "*", "*", "kwargs", ")", "if", "'url'", "in", "kwargs", ":", "kwargs", "[", "'url'", "]", "=", "kwargs", "[", "'url'", "]", "+", "\"?datatable=%s\"", "%", "(", "name", ",", ")", "for", "meta_opt", "in", "opts", ".", "__dict__", ":", "if", "meta_opt", "in", "kwargs", ":", "setattr", "(", "opts", ",", "meta_opt", ",", "kwargs", ".", "pop", "(", "meta_opt", ")", ")", "datatable_class", "=", "type", "(", "'%s_Synthesized'", "%", "(", "datatable_class", ".", "__name__", ",", ")", ",", "(", "datatable_class", ",", ")", ",", "{", "'__module__'", ":", "datatable_class", ".", "__module__", ",", "'Meta'", ":", "opts", ",", "}", ")", "self", ".", "_datatables", "[", "name", "]", "=", "datatable_class", "(", "*", "*", "kwargs", ")", "return", "self", ".", "_datatables" ]
Returns a dict of the datatables served by this view.
[ "Returns", "a", "dict", "of", "the", "datatables", "served", "by", "this", "view", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L205-L246
13,747
pivotal-energy-solutions/django-datatable-view
datatableview/views/base.py
MultipleDatatableMixin.get_default_datatable_kwargs
def get_default_datatable_kwargs(self, **kwargs): """ Builds the default set of kwargs for initializing a Datatable class. Note that by default the MultipleDatatableMixin does not support any configuration via the view's class attributes, and instead relies completely on the Datatable class itself to declare its configuration details. """ kwargs['view'] = self # This is provided by default, but if the view is instantiated outside of the request cycle # (such as for the purposes of embedding that view's datatable elsewhere), the request may # not be required, so the user may not have a compelling reason to go through the trouble of # putting it on self. if hasattr(self, 'request'): kwargs['url'] = self.request.path kwargs['query_config'] = getattr(self.request, self.request.method) else: kwargs['query_config'] = {} return kwargs
python
def get_default_datatable_kwargs(self, **kwargs): """ Builds the default set of kwargs for initializing a Datatable class. Note that by default the MultipleDatatableMixin does not support any configuration via the view's class attributes, and instead relies completely on the Datatable class itself to declare its configuration details. """ kwargs['view'] = self # This is provided by default, but if the view is instantiated outside of the request cycle # (such as for the purposes of embedding that view's datatable elsewhere), the request may # not be required, so the user may not have a compelling reason to go through the trouble of # putting it on self. if hasattr(self, 'request'): kwargs['url'] = self.request.path kwargs['query_config'] = getattr(self.request, self.request.method) else: kwargs['query_config'] = {} return kwargs
[ "def", "get_default_datatable_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'view'", "]", "=", "self", "# This is provided by default, but if the view is instantiated outside of the request cycle", "# (such as for the purposes of embedding that view's datatable elsewhere), the request may", "# not be required, so the user may not have a compelling reason to go through the trouble of", "# putting it on self.", "if", "hasattr", "(", "self", ",", "'request'", ")", ":", "kwargs", "[", "'url'", "]", "=", "self", ".", "request", ".", "path", "kwargs", "[", "'query_config'", "]", "=", "getattr", "(", "self", ".", "request", ",", "self", ".", "request", ".", "method", ")", "else", ":", "kwargs", "[", "'query_config'", "]", "=", "{", "}", "return", "kwargs" ]
Builds the default set of kwargs for initializing a Datatable class. Note that by default the MultipleDatatableMixin does not support any configuration via the view's class attributes, and instead relies completely on the Datatable class itself to declare its configuration details.
[ "Builds", "the", "default", "set", "of", "kwargs", "for", "initializing", "a", "Datatable", "class", ".", "Note", "that", "by", "default", "the", "MultipleDatatableMixin", "does", "not", "support", "any", "configuration", "via", "the", "view", "s", "class", "attributes", "and", "instead", "relies", "completely", "on", "the", "Datatable", "class", "itself", "to", "declare", "its", "configuration", "details", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/base.py#L254-L274
13,748
pivotal-energy-solutions/django-datatable-view
datatableview/columns.py
get_column_for_modelfield
def get_column_for_modelfield(model_field): """ Return the built-in Column class for a model field class. """ # If the field points to another model, we want to get the pk field of that other model and use # that as the real field. It is possible that a ForeignKey points to a model with table # inheritance, however, so we need to traverse the internal OneToOneField as well, so this will # climb the 'pk' field chain until we have something real. while model_field.related_model: model_field = model_field.related_model._meta.pk for ColumnClass, modelfield_classes in COLUMN_CLASSES: if isinstance(model_field, tuple(modelfield_classes)): return ColumnClass
python
def get_column_for_modelfield(model_field): """ Return the built-in Column class for a model field class. """ # If the field points to another model, we want to get the pk field of that other model and use # that as the real field. It is possible that a ForeignKey points to a model with table # inheritance, however, so we need to traverse the internal OneToOneField as well, so this will # climb the 'pk' field chain until we have something real. while model_field.related_model: model_field = model_field.related_model._meta.pk for ColumnClass, modelfield_classes in COLUMN_CLASSES: if isinstance(model_field, tuple(modelfield_classes)): return ColumnClass
[ "def", "get_column_for_modelfield", "(", "model_field", ")", ":", "# If the field points to another model, we want to get the pk field of that other model and use", "# that as the real field. It is possible that a ForeignKey points to a model with table", "# inheritance, however, so we need to traverse the internal OneToOneField as well, so this will", "# climb the 'pk' field chain until we have something real.", "while", "model_field", ".", "related_model", ":", "model_field", "=", "model_field", ".", "related_model", ".", "_meta", ".", "pk", "for", "ColumnClass", ",", "modelfield_classes", "in", "COLUMN_CLASSES", ":", "if", "isinstance", "(", "model_field", ",", "tuple", "(", "modelfield_classes", ")", ")", ":", "return", "ColumnClass" ]
Return the built-in Column class for a model field class.
[ "Return", "the", "built", "-", "in", "Column", "class", "for", "a", "model", "field", "class", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/columns.py#L52-L63
13,749
pivotal-energy-solutions/django-datatable-view
datatableview/columns.py
CompoundColumn.get_source_value
def get_source_value(self, obj, source, **kwargs): """ Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object to which term coercions and the query type lookup are delegated. """ result = [] for sub_source in self.expand_source(source): # Call super() to get default logic, but send it the 'sub_source' sub_result = super(CompoundColumn, self).get_source_value(obj, sub_source, **kwargs) result.extend(sub_result) return result
python
def get_source_value(self, obj, source, **kwargs): """ Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object to which term coercions and the query type lookup are delegated. """ result = [] for sub_source in self.expand_source(source): # Call super() to get default logic, but send it the 'sub_source' sub_result = super(CompoundColumn, self).get_source_value(obj, sub_source, **kwargs) result.extend(sub_result) return result
[ "def", "get_source_value", "(", "self", ",", "obj", ",", "source", ",", "*", "*", "kwargs", ")", ":", "result", "=", "[", "]", "for", "sub_source", "in", "self", ".", "expand_source", "(", "source", ")", ":", "# Call super() to get default logic, but send it the 'sub_source'", "sub_result", "=", "super", "(", "CompoundColumn", ",", "self", ")", ".", "get_source_value", "(", "obj", ",", "sub_source", ",", "*", "*", "kwargs", ")", "result", ".", "extend", "(", "sub_result", ")", "return", "result" ]
Treat ``field`` as a nested sub-Column instance, which explicitly stands in as the object to which term coercions and the query type lookup are delegated.
[ "Treat", "field", "as", "a", "nested", "sub", "-", "Column", "instance", "which", "explicitly", "stands", "in", "as", "the", "object", "to", "which", "term", "coercions", "and", "the", "query", "type", "lookup", "are", "delegated", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/columns.py#L544-L554
13,750
pivotal-energy-solutions/django-datatable-view
datatableview/columns.py
CompoundColumn._get_flat_db_sources
def _get_flat_db_sources(self, model): """ Return a flattened representation of the individual ``sources`` lists. """ sources = [] for source in self.sources: for sub_source in self.expand_source(source): target_field = self.resolve_source(model, sub_source) if target_field: sources.append(sub_source) return sources
python
def _get_flat_db_sources(self, model): """ Return a flattened representation of the individual ``sources`` lists. """ sources = [] for source in self.sources: for sub_source in self.expand_source(source): target_field = self.resolve_source(model, sub_source) if target_field: sources.append(sub_source) return sources
[ "def", "_get_flat_db_sources", "(", "self", ",", "model", ")", ":", "sources", "=", "[", "]", "for", "source", "in", "self", ".", "sources", ":", "for", "sub_source", "in", "self", ".", "expand_source", "(", "source", ")", ":", "target_field", "=", "self", ".", "resolve_source", "(", "model", ",", "sub_source", ")", "if", "target_field", ":", "sources", ".", "append", "(", "sub_source", ")", "return", "sources" ]
Return a flattened representation of the individual ``sources`` lists.
[ "Return", "a", "flattened", "representation", "of", "the", "individual", "sources", "lists", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/columns.py#L562-L570
13,751
pivotal-energy-solutions/django-datatable-view
datatableview/columns.py
CompoundColumn.get_source_handler
def get_source_handler(self, model, source): """ Allow the nested Column source to be its own handler. """ if isinstance(source, Column): return source # Generate a generic handler for the source modelfield = resolve_orm_path(model, source) column_class = get_column_for_modelfield(modelfield) return column_class()
python
def get_source_handler(self, model, source): """ Allow the nested Column source to be its own handler. """ if isinstance(source, Column): return source # Generate a generic handler for the source modelfield = resolve_orm_path(model, source) column_class = get_column_for_modelfield(modelfield) return column_class()
[ "def", "get_source_handler", "(", "self", ",", "model", ",", "source", ")", ":", "if", "isinstance", "(", "source", ",", "Column", ")", ":", "return", "source", "# Generate a generic handler for the source", "modelfield", "=", "resolve_orm_path", "(", "model", ",", "source", ")", "column_class", "=", "get_column_for_modelfield", "(", "modelfield", ")", "return", "column_class", "(", ")" ]
Allow the nested Column source to be its own handler.
[ "Allow", "the", "nested", "Column", "source", "to", "be", "its", "own", "handler", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/columns.py#L572-L580
13,752
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.dispatch
def dispatch(self, request, *args, **kwargs): """ Introduces the ``ensure_csrf_cookie`` decorator and handles xeditable choices ajax. """ if request.GET.get(self.xeditable_fieldname_param): return self.get_ajax_xeditable_choices(request, *args, **kwargs) return super(XEditableMixin, self).dispatch(request, *args, **kwargs)
python
def dispatch(self, request, *args, **kwargs): """ Introduces the ``ensure_csrf_cookie`` decorator and handles xeditable choices ajax. """ if request.GET.get(self.xeditable_fieldname_param): return self.get_ajax_xeditable_choices(request, *args, **kwargs) return super(XEditableMixin, self).dispatch(request, *args, **kwargs)
[ "def", "dispatch", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "request", ".", "GET", ".", "get", "(", "self", ".", "xeditable_fieldname_param", ")", ":", "return", "self", ".", "get_ajax_xeditable_choices", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "super", "(", "XEditableMixin", ",", "self", ")", ".", "dispatch", "(", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Introduces the ``ensure_csrf_cookie`` decorator and handles xeditable choices ajax.
[ "Introduces", "the", "ensure_csrf_cookie", "decorator", "and", "handles", "xeditable", "choices", "ajax", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L26-L30
13,753
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.get_ajax_xeditable_choices
def get_ajax_xeditable_choices(self, request, *args, **kwargs): """ AJAX GET handler for xeditable queries asking for field choice lists. """ field_name = request.GET.get(self.xeditable_fieldname_param) if not field_name: return HttpResponseBadRequest("Field name must be given") queryset = self.get_queryset() if not self.model: self.model = queryset.model # Sanitize the requested field name by limiting valid names to the datatable_options columns from datatableview.views import legacy if isinstance(self, legacy.LegacyDatatableMixin): columns = self._get_datatable_options()['columns'] for name in columns: if isinstance(name, (list, tuple)): name = name[1] if name == field_name: break else: return HttpResponseBadRequest("Invalid field name") else: datatable = self.get_datatable() if not hasattr(datatable, 'config'): datatable.configure() if field_name not in datatable.config['columns']: return HttpResponseBadRequest("Invalid field name") field = self.model._meta.get_field(field_name) choices = self.get_field_choices(field, field_name) return HttpResponse(json.dumps(choices))
python
def get_ajax_xeditable_choices(self, request, *args, **kwargs): """ AJAX GET handler for xeditable queries asking for field choice lists. """ field_name = request.GET.get(self.xeditable_fieldname_param) if not field_name: return HttpResponseBadRequest("Field name must be given") queryset = self.get_queryset() if not self.model: self.model = queryset.model # Sanitize the requested field name by limiting valid names to the datatable_options columns from datatableview.views import legacy if isinstance(self, legacy.LegacyDatatableMixin): columns = self._get_datatable_options()['columns'] for name in columns: if isinstance(name, (list, tuple)): name = name[1] if name == field_name: break else: return HttpResponseBadRequest("Invalid field name") else: datatable = self.get_datatable() if not hasattr(datatable, 'config'): datatable.configure() if field_name not in datatable.config['columns']: return HttpResponseBadRequest("Invalid field name") field = self.model._meta.get_field(field_name) choices = self.get_field_choices(field, field_name) return HttpResponse(json.dumps(choices))
[ "def", "get_ajax_xeditable_choices", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "field_name", "=", "request", ".", "GET", ".", "get", "(", "self", ".", "xeditable_fieldname_param", ")", "if", "not", "field_name", ":", "return", "HttpResponseBadRequest", "(", "\"Field name must be given\"", ")", "queryset", "=", "self", ".", "get_queryset", "(", ")", "if", "not", "self", ".", "model", ":", "self", ".", "model", "=", "queryset", ".", "model", "# Sanitize the requested field name by limiting valid names to the datatable_options columns", "from", "datatableview", ".", "views", "import", "legacy", "if", "isinstance", "(", "self", ",", "legacy", ".", "LegacyDatatableMixin", ")", ":", "columns", "=", "self", ".", "_get_datatable_options", "(", ")", "[", "'columns'", "]", "for", "name", "in", "columns", ":", "if", "isinstance", "(", "name", ",", "(", "list", ",", "tuple", ")", ")", ":", "name", "=", "name", "[", "1", "]", "if", "name", "==", "field_name", ":", "break", "else", ":", "return", "HttpResponseBadRequest", "(", "\"Invalid field name\"", ")", "else", ":", "datatable", "=", "self", ".", "get_datatable", "(", ")", "if", "not", "hasattr", "(", "datatable", ",", "'config'", ")", ":", "datatable", ".", "configure", "(", ")", "if", "field_name", "not", "in", "datatable", ".", "config", "[", "'columns'", "]", ":", "return", "HttpResponseBadRequest", "(", "\"Invalid field name\"", ")", "field", "=", "self", ".", "model", ".", "_meta", ".", "get_field", "(", "field_name", ")", "choices", "=", "self", ".", "get_field_choices", "(", "field", ",", "field_name", ")", "return", "HttpResponse", "(", "json", ".", "dumps", "(", "choices", ")", ")" ]
AJAX GET handler for xeditable queries asking for field choice lists.
[ "AJAX", "GET", "handler", "for", "xeditable", "queries", "asking", "for", "field", "choice", "lists", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L32-L62
13,754
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.post
def post(self, request, *args, **kwargs): """ Builds a dynamic form that targets only the field in question, and saves the modification. """ self.object_list = None form = self.get_xeditable_form(self.get_xeditable_form_class()) if form.is_valid(): obj = self.get_update_object(form) if obj is None: data = json.dumps({ 'status': 'error', 'message': "Object does not exist." }) return HttpResponse(data, content_type="application/json", status=404) return self.update_object(form, obj) else: data = json.dumps({ 'status': 'error', 'message': "Invalid request", 'form_errors': form.errors, }) return HttpResponse(data, content_type="application/json", status=400)
python
def post(self, request, *args, **kwargs): """ Builds a dynamic form that targets only the field in question, and saves the modification. """ self.object_list = None form = self.get_xeditable_form(self.get_xeditable_form_class()) if form.is_valid(): obj = self.get_update_object(form) if obj is None: data = json.dumps({ 'status': 'error', 'message': "Object does not exist." }) return HttpResponse(data, content_type="application/json", status=404) return self.update_object(form, obj) else: data = json.dumps({ 'status': 'error', 'message': "Invalid request", 'form_errors': form.errors, }) return HttpResponse(data, content_type="application/json", status=400)
[ "def", "post", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "object_list", "=", "None", "form", "=", "self", ".", "get_xeditable_form", "(", "self", ".", "get_xeditable_form_class", "(", ")", ")", "if", "form", ".", "is_valid", "(", ")", ":", "obj", "=", "self", ".", "get_update_object", "(", "form", ")", "if", "obj", "is", "None", ":", "data", "=", "json", ".", "dumps", "(", "{", "'status'", ":", "'error'", ",", "'message'", ":", "\"Object does not exist.\"", "}", ")", "return", "HttpResponse", "(", "data", ",", "content_type", "=", "\"application/json\"", ",", "status", "=", "404", ")", "return", "self", ".", "update_object", "(", "form", ",", "obj", ")", "else", ":", "data", "=", "json", ".", "dumps", "(", "{", "'status'", ":", "'error'", ",", "'message'", ":", "\"Invalid request\"", ",", "'form_errors'", ":", "form", ".", "errors", ",", "}", ")", "return", "HttpResponse", "(", "data", ",", "content_type", "=", "\"application/json\"", ",", "status", "=", "400", ")" ]
Builds a dynamic form that targets only the field in question, and saves the modification.
[ "Builds", "a", "dynamic", "form", "that", "targets", "only", "the", "field", "in", "question", "and", "saves", "the", "modification", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L64-L85
13,755
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.get_xeditable_form_kwargs
def get_xeditable_form_kwargs(self): """ Returns a dict of keyword arguments to be sent to the xeditable form class. """ kwargs = { 'model': self.get_queryset().model, } if self.request.method in ('POST', 'PUT'): kwargs.update({ 'data': self.request.POST, }) return kwargs
python
def get_xeditable_form_kwargs(self): """ Returns a dict of keyword arguments to be sent to the xeditable form class. """ kwargs = { 'model': self.get_queryset().model, } if self.request.method in ('POST', 'PUT'): kwargs.update({ 'data': self.request.POST, }) return kwargs
[ "def", "get_xeditable_form_kwargs", "(", "self", ")", ":", "kwargs", "=", "{", "'model'", ":", "self", ".", "get_queryset", "(", ")", ".", "model", ",", "}", "if", "self", ".", "request", ".", "method", "in", "(", "'POST'", ",", "'PUT'", ")", ":", "kwargs", ".", "update", "(", "{", "'data'", ":", "self", ".", "request", ".", "POST", ",", "}", ")", "return", "kwargs" ]
Returns a dict of keyword arguments to be sent to the xeditable form class.
[ "Returns", "a", "dict", "of", "keyword", "arguments", "to", "be", "sent", "to", "the", "xeditable", "form", "class", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L91-L100
13,756
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.get_update_object
def get_update_object(self, form): """ Retrieves the target object based on the update form's ``pk`` and the table's queryset. """ pk = form.cleaned_data['pk'] queryset = self.get_queryset() try: obj = queryset.get(pk=pk) except queryset.model.DoesNotExist: obj = None return obj
python
def get_update_object(self, form): """ Retrieves the target object based on the update form's ``pk`` and the table's queryset. """ pk = form.cleaned_data['pk'] queryset = self.get_queryset() try: obj = queryset.get(pk=pk) except queryset.model.DoesNotExist: obj = None return obj
[ "def", "get_update_object", "(", "self", ",", "form", ")", ":", "pk", "=", "form", ".", "cleaned_data", "[", "'pk'", "]", "queryset", "=", "self", ".", "get_queryset", "(", ")", "try", ":", "obj", "=", "queryset", ".", "get", "(", "pk", "=", "pk", ")", "except", "queryset", ".", "model", ".", "DoesNotExist", ":", "obj", "=", "None", "return", "obj" ]
Retrieves the target object based on the update form's ``pk`` and the table's queryset.
[ "Retrieves", "the", "target", "object", "based", "on", "the", "update", "form", "s", "pk", "and", "the", "table", "s", "queryset", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L106-L117
13,757
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.update_object
def update_object(self, form, obj): """ Saves the new value to the target object. """ field_name = form.cleaned_data['name'] value = form.cleaned_data['value'] setattr(obj, field_name, value) save_kwargs = {} if CAN_UPDATE_FIELDS: save_kwargs['update_fields'] = [field_name] obj.save(**save_kwargs) data = json.dumps({ 'status': 'success', }) return HttpResponse(data, content_type="application/json")
python
def update_object(self, form, obj): """ Saves the new value to the target object. """ field_name = form.cleaned_data['name'] value = form.cleaned_data['value'] setattr(obj, field_name, value) save_kwargs = {} if CAN_UPDATE_FIELDS: save_kwargs['update_fields'] = [field_name] obj.save(**save_kwargs) data = json.dumps({ 'status': 'success', }) return HttpResponse(data, content_type="application/json")
[ "def", "update_object", "(", "self", ",", "form", ",", "obj", ")", ":", "field_name", "=", "form", ".", "cleaned_data", "[", "'name'", "]", "value", "=", "form", ".", "cleaned_data", "[", "'value'", "]", "setattr", "(", "obj", ",", "field_name", ",", "value", ")", "save_kwargs", "=", "{", "}", "if", "CAN_UPDATE_FIELDS", ":", "save_kwargs", "[", "'update_fields'", "]", "=", "[", "field_name", "]", "obj", ".", "save", "(", "*", "*", "save_kwargs", ")", "data", "=", "json", ".", "dumps", "(", "{", "'status'", ":", "'success'", ",", "}", ")", "return", "HttpResponse", "(", "data", ",", "content_type", "=", "\"application/json\"", ")" ]
Saves the new value to the target object.
[ "Saves", "the", "new", "value", "to", "the", "target", "object", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L119-L132
13,758
pivotal-energy-solutions/django-datatable-view
datatableview/views/xeditable.py
XEditableMixin.get_field_choices
def get_field_choices(self, field, field_name): """ Returns the valid choices for ``field``. The ``field_name`` argument is given for convenience. """ if self.request.GET.get('select2'): names = ['id', 'text'] else: names = ['value', 'text'] choices_getter = getattr(self, 'get_field_%s_choices', None) if choices_getter is None: if isinstance(field, ForeignKey): choices_getter = self._get_foreignkey_choices else: choices_getter = self._get_default_choices return [dict(zip(names, choice)) for choice in choices_getter(field, field_name)]
python
def get_field_choices(self, field, field_name): """ Returns the valid choices for ``field``. The ``field_name`` argument is given for convenience. """ if self.request.GET.get('select2'): names = ['id', 'text'] else: names = ['value', 'text'] choices_getter = getattr(self, 'get_field_%s_choices', None) if choices_getter is None: if isinstance(field, ForeignKey): choices_getter = self._get_foreignkey_choices else: choices_getter = self._get_default_choices return [dict(zip(names, choice)) for choice in choices_getter(field, field_name)]
[ "def", "get_field_choices", "(", "self", ",", "field", ",", "field_name", ")", ":", "if", "self", ".", "request", ".", "GET", ".", "get", "(", "'select2'", ")", ":", "names", "=", "[", "'id'", ",", "'text'", "]", "else", ":", "names", "=", "[", "'value'", ",", "'text'", "]", "choices_getter", "=", "getattr", "(", "self", ",", "'get_field_%s_choices'", ",", "None", ")", "if", "choices_getter", "is", "None", ":", "if", "isinstance", "(", "field", ",", "ForeignKey", ")", ":", "choices_getter", "=", "self", ".", "_get_foreignkey_choices", "else", ":", "choices_getter", "=", "self", ".", "_get_default_choices", "return", "[", "dict", "(", "zip", "(", "names", ",", "choice", ")", ")", "for", "choice", "in", "choices_getter", "(", "field", ",", "field_name", ")", "]" ]
Returns the valid choices for ``field``. The ``field_name`` argument is given for convenience.
[ "Returns", "the", "valid", "choices", "for", "field", ".", "The", "field_name", "argument", "is", "given", "for", "convenience", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/xeditable.py#L134-L149
13,759
pivotal-energy-solutions/django-datatable-view
datatableview/datatables.py
ValuesDatatable.preload_record_data
def preload_record_data(self, obj): """ Modifies the ``obj`` values dict to alias the selected values to the column name that asked for its selection. For example, a datatable that declares a column ``'blog'`` which has a related lookup source ``'blog__name'`` will ensure that the selected value exists in ``obj`` at both keys ``blog__name`` and ``blog`` (the former because that was how it was selected, the latter because that was the column name used to select it). :Example: ``{'pk': 1, 'blog__name': "My Blog"}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog': "My Blog"}`` When a column declares multiple :py:attr:`~datatableview.columns.Column.sources`, the column name's entry in ``obj`` will be a list of each of those values. :Example: ``{'pk': 1, 'blog__name': "My Blog", 'blog__id': 5}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog__id': 5, 'blog': ["My Blog", 5]}`` In every situation, the original selected values will always be retained in ``obj``. """ data = {} for orm_path, column_name in self.value_queries.items(): value = obj[orm_path] if column_name not in data: data[column_name] = value else: if not isinstance(data[column_name], (tuple, list)): data[column_name] = [data[column_name]] data[column_name].append(value) obj.update(data) return super(ValuesDatatable, self).preload_record_data(obj)
python
def preload_record_data(self, obj): """ Modifies the ``obj`` values dict to alias the selected values to the column name that asked for its selection. For example, a datatable that declares a column ``'blog'`` which has a related lookup source ``'blog__name'`` will ensure that the selected value exists in ``obj`` at both keys ``blog__name`` and ``blog`` (the former because that was how it was selected, the latter because that was the column name used to select it). :Example: ``{'pk': 1, 'blog__name': "My Blog"}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog': "My Blog"}`` When a column declares multiple :py:attr:`~datatableview.columns.Column.sources`, the column name's entry in ``obj`` will be a list of each of those values. :Example: ``{'pk': 1, 'blog__name': "My Blog", 'blog__id': 5}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog__id': 5, 'blog': ["My Blog", 5]}`` In every situation, the original selected values will always be retained in ``obj``. """ data = {} for orm_path, column_name in self.value_queries.items(): value = obj[orm_path] if column_name not in data: data[column_name] = value else: if not isinstance(data[column_name], (tuple, list)): data[column_name] = [data[column_name]] data[column_name].append(value) obj.update(data) return super(ValuesDatatable, self).preload_record_data(obj)
[ "def", "preload_record_data", "(", "self", ",", "obj", ")", ":", "data", "=", "{", "}", "for", "orm_path", ",", "column_name", "in", "self", ".", "value_queries", ".", "items", "(", ")", ":", "value", "=", "obj", "[", "orm_path", "]", "if", "column_name", "not", "in", "data", ":", "data", "[", "column_name", "]", "=", "value", "else", ":", "if", "not", "isinstance", "(", "data", "[", "column_name", "]", ",", "(", "tuple", ",", "list", ")", ")", ":", "data", "[", "column_name", "]", "=", "[", "data", "[", "column_name", "]", "]", "data", "[", "column_name", "]", ".", "append", "(", "value", ")", "obj", ".", "update", "(", "data", ")", "return", "super", "(", "ValuesDatatable", ",", "self", ")", ".", "preload_record_data", "(", "obj", ")" ]
Modifies the ``obj`` values dict to alias the selected values to the column name that asked for its selection. For example, a datatable that declares a column ``'blog'`` which has a related lookup source ``'blog__name'`` will ensure that the selected value exists in ``obj`` at both keys ``blog__name`` and ``blog`` (the former because that was how it was selected, the latter because that was the column name used to select it). :Example: ``{'pk': 1, 'blog__name': "My Blog"}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog': "My Blog"}`` When a column declares multiple :py:attr:`~datatableview.columns.Column.sources`, the column name's entry in ``obj`` will be a list of each of those values. :Example: ``{'pk': 1, 'blog__name': "My Blog", 'blog__id': 5}`` ``{'pk': 1: 'blog__name': "My Blog", 'blog__id': 5, 'blog': ["My Blog", 5]}`` In every situation, the original selected values will always be retained in ``obj``.
[ "Modifies", "the", "obj", "values", "dict", "to", "alias", "the", "selected", "values", "to", "the", "column", "name", "that", "asked", "for", "its", "selection", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/datatables.py#L962-L998
13,760
pivotal-energy-solutions/django-datatable-view
datatableview/datatables.py
LegacyDatatable.resolve_virtual_columns
def resolve_virtual_columns(self, *names): """ Assume that all ``names`` are legacy-style tuple declarations, and generate modern columns instances to match the behavior of the old syntax. """ from .views.legacy import get_field_definition virtual_columns = {} for name in names: field = get_field_definition(name) column = TextColumn(sources=field.fields, label=field.pretty_name, processor=field.callback) column.name = field.pretty_name if field.pretty_name else field.fields[0] virtual_columns[name] = column # Make sure it's in the same order as originally defined new_columns = OrderedDict() for name in self._meta.columns: # Can't use self.config yet, hasn't been generated if self.columns.get(name): column = self.columns[name] else: column = virtual_columns[name] new_columns[column.name] = column self.columns = new_columns
python
def resolve_virtual_columns(self, *names): """ Assume that all ``names`` are legacy-style tuple declarations, and generate modern columns instances to match the behavior of the old syntax. """ from .views.legacy import get_field_definition virtual_columns = {} for name in names: field = get_field_definition(name) column = TextColumn(sources=field.fields, label=field.pretty_name, processor=field.callback) column.name = field.pretty_name if field.pretty_name else field.fields[0] virtual_columns[name] = column # Make sure it's in the same order as originally defined new_columns = OrderedDict() for name in self._meta.columns: # Can't use self.config yet, hasn't been generated if self.columns.get(name): column = self.columns[name] else: column = virtual_columns[name] new_columns[column.name] = column self.columns = new_columns
[ "def", "resolve_virtual_columns", "(", "self", ",", "*", "names", ")", ":", "from", ".", "views", ".", "legacy", "import", "get_field_definition", "virtual_columns", "=", "{", "}", "for", "name", "in", "names", ":", "field", "=", "get_field_definition", "(", "name", ")", "column", "=", "TextColumn", "(", "sources", "=", "field", ".", "fields", ",", "label", "=", "field", ".", "pretty_name", ",", "processor", "=", "field", ".", "callback", ")", "column", ".", "name", "=", "field", ".", "pretty_name", "if", "field", ".", "pretty_name", "else", "field", ".", "fields", "[", "0", "]", "virtual_columns", "[", "name", "]", "=", "column", "# Make sure it's in the same order as originally defined", "new_columns", "=", "OrderedDict", "(", ")", "for", "name", "in", "self", ".", "_meta", ".", "columns", ":", "# Can't use self.config yet, hasn't been generated", "if", "self", ".", "columns", ".", "get", "(", "name", ")", ":", "column", "=", "self", ".", "columns", "[", "name", "]", "else", ":", "column", "=", "virtual_columns", "[", "name", "]", "new_columns", "[", "column", ".", "name", "]", "=", "column", "self", ".", "columns", "=", "new_columns" ]
Assume that all ``names`` are legacy-style tuple declarations, and generate modern columns instances to match the behavior of the old syntax.
[ "Assume", "that", "all", "names", "are", "legacy", "-", "style", "tuple", "declarations", "and", "generate", "modern", "columns", "instances", "to", "match", "the", "behavior", "of", "the", "old", "syntax", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/datatables.py#L1010-L1032
13,761
pivotal-energy-solutions/django-datatable-view
datatableview/forms.py
XEditableUpdateForm.set_value_field
def set_value_field(self, model, field_name): """ Adds a ``value`` field to this form that uses the appropriate formfield for the named target field. This will help to ensure that the value is correctly validated. """ fields = fields_for_model(model, fields=[field_name]) self.fields['value'] = fields[field_name]
python
def set_value_field(self, model, field_name): """ Adds a ``value`` field to this form that uses the appropriate formfield for the named target field. This will help to ensure that the value is correctly validated. """ fields = fields_for_model(model, fields=[field_name]) self.fields['value'] = fields[field_name]
[ "def", "set_value_field", "(", "self", ",", "model", ",", "field_name", ")", ":", "fields", "=", "fields_for_model", "(", "model", ",", "fields", "=", "[", "field_name", "]", ")", "self", ".", "fields", "[", "'value'", "]", "=", "fields", "[", "field_name", "]" ]
Adds a ``value`` field to this form that uses the appropriate formfield for the named target field. This will help to ensure that the value is correctly validated.
[ "Adds", "a", "value", "field", "to", "this", "form", "that", "uses", "the", "appropriate", "formfield", "for", "the", "named", "target", "field", ".", "This", "will", "help", "to", "ensure", "that", "the", "value", "is", "correctly", "validated", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/forms.py#L30-L36
13,762
pivotal-energy-solutions/django-datatable-view
datatableview/forms.py
XEditableUpdateForm.clean_name
def clean_name(self): """ Validates that the ``name`` field corresponds to a field on the model. """ field_name = self.cleaned_data['name'] # get_all_field_names is deprecated in Django 1.8, this also fixes proxied models if hasattr(self.model._meta, 'get_fields'): field_names = [field.name for field in self.model._meta.get_fields()] else: field_names = self.model._meta.get_all_field_names() if field_name not in field_names: raise ValidationError("%r is not a valid field." % field_name) return field_name
python
def clean_name(self): """ Validates that the ``name`` field corresponds to a field on the model. """ field_name = self.cleaned_data['name'] # get_all_field_names is deprecated in Django 1.8, this also fixes proxied models if hasattr(self.model._meta, 'get_fields'): field_names = [field.name for field in self.model._meta.get_fields()] else: field_names = self.model._meta.get_all_field_names() if field_name not in field_names: raise ValidationError("%r is not a valid field." % field_name) return field_name
[ "def", "clean_name", "(", "self", ")", ":", "field_name", "=", "self", ".", "cleaned_data", "[", "'name'", "]", "# get_all_field_names is deprecated in Django 1.8, this also fixes proxied models", "if", "hasattr", "(", "self", ".", "model", ".", "_meta", ",", "'get_fields'", ")", ":", "field_names", "=", "[", "field", ".", "name", "for", "field", "in", "self", ".", "model", ".", "_meta", ".", "get_fields", "(", ")", "]", "else", ":", "field_names", "=", "self", ".", "model", ".", "_meta", ".", "get_all_field_names", "(", ")", "if", "field_name", "not", "in", "field_names", ":", "raise", "ValidationError", "(", "\"%r is not a valid field.\"", "%", "field_name", ")", "return", "field_name" ]
Validates that the ``name`` field corresponds to a field on the model.
[ "Validates", "that", "the", "name", "field", "corresponds", "to", "a", "field", "on", "the", "model", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/forms.py#L38-L48
13,763
pivotal-energy-solutions/django-datatable-view
datatableview/views/legacy.py
get_field_definition
def get_field_definition(field_definition): """ Normalizes a field definition into its component parts, even if some are missing. """ if not isinstance(field_definition, (tuple, list)): field_definition = [field_definition] else: field_definition = list(field_definition) if len(field_definition) == 1: field = [None, field_definition, None] elif len(field_definition) == 2: field = field_definition + [None] elif len(field_definition) == 3: field = field_definition else: raise ValueError("Invalid field definition format.") if not isinstance(field[1], (tuple, list)): field[1] = (field[1],) field[1] = tuple(name for name in field[1] if name is not None) return FieldDefinitionTuple(*field)
python
def get_field_definition(field_definition): """ Normalizes a field definition into its component parts, even if some are missing. """ if not isinstance(field_definition, (tuple, list)): field_definition = [field_definition] else: field_definition = list(field_definition) if len(field_definition) == 1: field = [None, field_definition, None] elif len(field_definition) == 2: field = field_definition + [None] elif len(field_definition) == 3: field = field_definition else: raise ValueError("Invalid field definition format.") if not isinstance(field[1], (tuple, list)): field[1] = (field[1],) field[1] = tuple(name for name in field[1] if name is not None) return FieldDefinitionTuple(*field)
[ "def", "get_field_definition", "(", "field_definition", ")", ":", "if", "not", "isinstance", "(", "field_definition", ",", "(", "tuple", ",", "list", ")", ")", ":", "field_definition", "=", "[", "field_definition", "]", "else", ":", "field_definition", "=", "list", "(", "field_definition", ")", "if", "len", "(", "field_definition", ")", "==", "1", ":", "field", "=", "[", "None", ",", "field_definition", ",", "None", "]", "elif", "len", "(", "field_definition", ")", "==", "2", ":", "field", "=", "field_definition", "+", "[", "None", "]", "elif", "len", "(", "field_definition", ")", "==", "3", ":", "field", "=", "field_definition", "else", ":", "raise", "ValueError", "(", "\"Invalid field definition format.\"", ")", "if", "not", "isinstance", "(", "field", "[", "1", "]", ",", "(", "tuple", ",", "list", ")", ")", ":", "field", "[", "1", "]", "=", "(", "field", "[", "1", "]", ",", ")", "field", "[", "1", "]", "=", "tuple", "(", "name", "for", "name", "in", "field", "[", "1", "]", "if", "name", "is", "not", "None", ")", "return", "FieldDefinitionTuple", "(", "*", "field", ")" ]
Normalizes a field definition into its component parts, even if some are missing.
[ "Normalizes", "a", "field", "definition", "into", "its", "component", "parts", "even", "if", "some", "are", "missing", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/views/legacy.py#L31-L51
13,764
pivotal-energy-solutions/django-datatable-view
datatableview/cache.py
get_cached_data
def get_cached_data(datatable, **kwargs): """ Returns the cached object list under the appropriate key, or None if not set. """ cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs)) data = cache.get(cache_key) log.debug("Reading data from cache at %r: %r", cache_key, data) return data
python
def get_cached_data(datatable, **kwargs): """ Returns the cached object list under the appropriate key, or None if not set. """ cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs)) data = cache.get(cache_key) log.debug("Reading data from cache at %r: %r", cache_key, data) return data
[ "def", "get_cached_data", "(", "datatable", ",", "*", "*", "kwargs", ")", ":", "cache_key", "=", "'%s%s'", "%", "(", "CACHE_PREFIX", ",", "datatable", ".", "get_cache_key", "(", "*", "*", "kwargs", ")", ")", "data", "=", "cache", ".", "get", "(", "cache_key", ")", "log", ".", "debug", "(", "\"Reading data from cache at %r: %r\"", ",", "cache_key", ",", "data", ")", "return", "data" ]
Returns the cached object list under the appropriate key, or None if not set.
[ "Returns", "the", "cached", "object", "list", "under", "the", "appropriate", "key", "or", "None", "if", "not", "set", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/cache.py#L101-L106
13,765
pivotal-energy-solutions/django-datatable-view
datatableview/cache.py
cache_data
def cache_data(datatable, data, **kwargs): """ Stores the object list in the cache under the appropriate key. """ cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs)) log.debug("Setting data to cache at %r: %r", cache_key, data) cache.set(cache_key, data)
python
def cache_data(datatable, data, **kwargs): """ Stores the object list in the cache under the appropriate key. """ cache_key = '%s%s' % (CACHE_PREFIX, datatable.get_cache_key(**kwargs)) log.debug("Setting data to cache at %r: %r", cache_key, data) cache.set(cache_key, data)
[ "def", "cache_data", "(", "datatable", ",", "data", ",", "*", "*", "kwargs", ")", ":", "cache_key", "=", "'%s%s'", "%", "(", "CACHE_PREFIX", ",", "datatable", ".", "get_cache_key", "(", "*", "*", "kwargs", ")", ")", "log", ".", "debug", "(", "\"Setting data to cache at %r: %r\"", ",", "cache_key", ",", "data", ")", "cache", ".", "set", "(", "cache_key", ",", "data", ")" ]
Stores the object list in the cache under the appropriate key.
[ "Stores", "the", "object", "list", "in", "the", "cache", "under", "the", "appropriate", "key", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/cache.py#L109-L113
13,766
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
keyed_helper
def keyed_helper(helper): """ Decorator for helper functions that operate on direct values instead of model instances. A keyed helper is one that can be used normally in the view's own custom callbacks, but also supports direct access in the column declaration, such as in the example: datatable_options = { 'columns': [ ('Field Name', 'fieldname', make_boolean_checkmark(key=attrgetter('fieldname'))), ], } With the help of a ``sort``-style ``key`` argument, the helper can receive all the information it requires in advance, so that the view doesn't have to go through the trouble of declaring a custom callback method that simply returns the value of the ``make_boolean_checkmark()`` helper. If the attribute being fetched is identical to the one pointed to in the column declaration, even the ``key`` argument can be omitted: ('Field Name', 'fieldname', make_boolean_checkmark)), """ @wraps(helper) def wrapper(instance=None, key=None, attr=None, *args, **kwargs): if set((instance, key, attr)) == {None}: # helper was called in place with neither important arg raise ValueError("If called directly, helper function '%s' requires either a model" " instance, or a 'key' or 'attr' keyword argument." % helper.__name__) if instance is not None: return helper(instance, *args, **kwargs) if key is None and attr is None: attr = 'self' if attr: if attr == 'self': key = lambda obj: obj else: key = operator.attrgetter(attr) # Helper is used directly in the columns declaration. A new callable is # returned to take the place of a callback. @wraps(helper) def helper_wrapper(instance, *args, **kwargs): return helper(key(instance), *args, **kwargs) return helper_wrapper wrapper._is_wrapped = True return wrapper
python
def keyed_helper(helper): """ Decorator for helper functions that operate on direct values instead of model instances. A keyed helper is one that can be used normally in the view's own custom callbacks, but also supports direct access in the column declaration, such as in the example: datatable_options = { 'columns': [ ('Field Name', 'fieldname', make_boolean_checkmark(key=attrgetter('fieldname'))), ], } With the help of a ``sort``-style ``key`` argument, the helper can receive all the information it requires in advance, so that the view doesn't have to go through the trouble of declaring a custom callback method that simply returns the value of the ``make_boolean_checkmark()`` helper. If the attribute being fetched is identical to the one pointed to in the column declaration, even the ``key`` argument can be omitted: ('Field Name', 'fieldname', make_boolean_checkmark)), """ @wraps(helper) def wrapper(instance=None, key=None, attr=None, *args, **kwargs): if set((instance, key, attr)) == {None}: # helper was called in place with neither important arg raise ValueError("If called directly, helper function '%s' requires either a model" " instance, or a 'key' or 'attr' keyword argument." % helper.__name__) if instance is not None: return helper(instance, *args, **kwargs) if key is None and attr is None: attr = 'self' if attr: if attr == 'self': key = lambda obj: obj else: key = operator.attrgetter(attr) # Helper is used directly in the columns declaration. A new callable is # returned to take the place of a callback. @wraps(helper) def helper_wrapper(instance, *args, **kwargs): return helper(key(instance), *args, **kwargs) return helper_wrapper wrapper._is_wrapped = True return wrapper
[ "def", "keyed_helper", "(", "helper", ")", ":", "@", "wraps", "(", "helper", ")", "def", "wrapper", "(", "instance", "=", "None", ",", "key", "=", "None", ",", "attr", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "set", "(", "(", "instance", ",", "key", ",", "attr", ")", ")", "==", "{", "None", "}", ":", "# helper was called in place with neither important arg", "raise", "ValueError", "(", "\"If called directly, helper function '%s' requires either a model\"", "\" instance, or a 'key' or 'attr' keyword argument.\"", "%", "helper", ".", "__name__", ")", "if", "instance", "is", "not", "None", ":", "return", "helper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "key", "is", "None", "and", "attr", "is", "None", ":", "attr", "=", "'self'", "if", "attr", ":", "if", "attr", "==", "'self'", ":", "key", "=", "lambda", "obj", ":", "obj", "else", ":", "key", "=", "operator", ".", "attrgetter", "(", "attr", ")", "# Helper is used directly in the columns declaration. A new callable is", "# returned to take the place of a callback.", "@", "wraps", "(", "helper", ")", "def", "helper_wrapper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "helper", "(", "key", "(", "instance", ")", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "helper_wrapper", "wrapper", ".", "_is_wrapped", "=", "True", "return", "wrapper" ]
Decorator for helper functions that operate on direct values instead of model instances. A keyed helper is one that can be used normally in the view's own custom callbacks, but also supports direct access in the column declaration, such as in the example: datatable_options = { 'columns': [ ('Field Name', 'fieldname', make_boolean_checkmark(key=attrgetter('fieldname'))), ], } With the help of a ``sort``-style ``key`` argument, the helper can receive all the information it requires in advance, so that the view doesn't have to go through the trouble of declaring a custom callback method that simply returns the value of the ``make_boolean_checkmark()`` helper. If the attribute being fetched is identical to the one pointed to in the column declaration, even the ``key`` argument can be omitted: ('Field Name', 'fieldname', make_boolean_checkmark)),
[ "Decorator", "for", "helper", "functions", "that", "operate", "on", "direct", "values", "instead", "of", "model", "instances", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L32-L84
13,767
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
itemgetter
def itemgetter(k, ellipsis=False, key=None): """ Looks up ``k`` as an index of the column's value. If ``k`` is a ``slice`` type object, then ``ellipsis`` can be given as a string to use to indicate truncation. Alternatively, ``ellipsis`` can be set to ``True`` to use a default ``'...'``. If a ``key`` is given, it may be a function which maps the target value to something else before the item lookup takes place. Examples:: # Choose an item from a list source. winner = columns.TextColumn("Winner", sources=['get_rankings'], processor=itemgetter(0)) # Take instance.description[:30] and append "..." to the end if truncation occurs. description = columns.TextColumn("Description", sources=['description'], processor=itemgetter(slice(None, 30), ellipsis=True)) """ def helper(instance, *args, **kwargs): default_value = kwargs.get('default_value') if default_value is None: default_value = instance value = default_value[k] if ellipsis and isinstance(k, slice) and isinstance(value, six.string_types) and \ len(default_value) > len(value): if ellipsis is True: value += "..." else: value += ellipsis return value if key: helper = keyed_helper(helper)(key=key) return helper
python
def itemgetter(k, ellipsis=False, key=None): """ Looks up ``k`` as an index of the column's value. If ``k`` is a ``slice`` type object, then ``ellipsis`` can be given as a string to use to indicate truncation. Alternatively, ``ellipsis`` can be set to ``True`` to use a default ``'...'``. If a ``key`` is given, it may be a function which maps the target value to something else before the item lookup takes place. Examples:: # Choose an item from a list source. winner = columns.TextColumn("Winner", sources=['get_rankings'], processor=itemgetter(0)) # Take instance.description[:30] and append "..." to the end if truncation occurs. description = columns.TextColumn("Description", sources=['description'], processor=itemgetter(slice(None, 30), ellipsis=True)) """ def helper(instance, *args, **kwargs): default_value = kwargs.get('default_value') if default_value is None: default_value = instance value = default_value[k] if ellipsis and isinstance(k, slice) and isinstance(value, six.string_types) and \ len(default_value) > len(value): if ellipsis is True: value += "..." else: value += ellipsis return value if key: helper = keyed_helper(helper)(key=key) return helper
[ "def", "itemgetter", "(", "k", ",", "ellipsis", "=", "False", ",", "key", "=", "None", ")", ":", "def", "helper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "default_value", "=", "kwargs", ".", "get", "(", "'default_value'", ")", "if", "default_value", "is", "None", ":", "default_value", "=", "instance", "value", "=", "default_value", "[", "k", "]", "if", "ellipsis", "and", "isinstance", "(", "k", ",", "slice", ")", "and", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", "and", "len", "(", "default_value", ")", ">", "len", "(", "value", ")", ":", "if", "ellipsis", "is", "True", ":", "value", "+=", "\"...\"", "else", ":", "value", "+=", "ellipsis", "return", "value", "if", "key", ":", "helper", "=", "keyed_helper", "(", "helper", ")", "(", "key", "=", "key", ")", "return", "helper" ]
Looks up ``k`` as an index of the column's value. If ``k`` is a ``slice`` type object, then ``ellipsis`` can be given as a string to use to indicate truncation. Alternatively, ``ellipsis`` can be set to ``True`` to use a default ``'...'``. If a ``key`` is given, it may be a function which maps the target value to something else before the item lookup takes place. Examples:: # Choose an item from a list source. winner = columns.TextColumn("Winner", sources=['get_rankings'], processor=itemgetter(0)) # Take instance.description[:30] and append "..." to the end if truncation occurs. description = columns.TextColumn("Description", sources=['description'], processor=itemgetter(slice(None, 30), ellipsis=True))
[ "Looks", "up", "k", "as", "an", "index", "of", "the", "column", "s", "value", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L150-L187
13,768
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
attrgetter
def attrgetter(attr, key=None): """ Looks up ``attr`` on the target value. If the result is a callable, it will be called in place without arguments. If a ``key`` is given, it may be a function which maps the target value to something else before the attribute lookup takes place. Examples:: # Explicitly selecting the sources and then using a processor to allow the model # method to organize the data itself, you can still provide all the necessary # ORM hints to the column. # This is definitely superior to having sources=['get_address']. address = columns.TextColumn("Address", sources=['street', 'city', 'state', 'zip'], processor=attrgetter('get_address')) """ def helper(instance, *args, **kwargs): value = instance for bit in attr.split('.'): value = getattr(value, bit) if callable(value): value = value() return value if key: helper = keyed_helper(helper)(key=key) return helper
python
def attrgetter(attr, key=None): """ Looks up ``attr`` on the target value. If the result is a callable, it will be called in place without arguments. If a ``key`` is given, it may be a function which maps the target value to something else before the attribute lookup takes place. Examples:: # Explicitly selecting the sources and then using a processor to allow the model # method to organize the data itself, you can still provide all the necessary # ORM hints to the column. # This is definitely superior to having sources=['get_address']. address = columns.TextColumn("Address", sources=['street', 'city', 'state', 'zip'], processor=attrgetter('get_address')) """ def helper(instance, *args, **kwargs): value = instance for bit in attr.split('.'): value = getattr(value, bit) if callable(value): value = value() return value if key: helper = keyed_helper(helper)(key=key) return helper
[ "def", "attrgetter", "(", "attr", ",", "key", "=", "None", ")", ":", "def", "helper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", "=", "instance", "for", "bit", "in", "attr", ".", "split", "(", "'.'", ")", ":", "value", "=", "getattr", "(", "value", ",", "bit", ")", "if", "callable", "(", "value", ")", ":", "value", "=", "value", "(", ")", "return", "value", "if", "key", ":", "helper", "=", "keyed_helper", "(", "helper", ")", "(", "key", "=", "key", ")", "return", "helper" ]
Looks up ``attr`` on the target value. If the result is a callable, it will be called in place without arguments. If a ``key`` is given, it may be a function which maps the target value to something else before the attribute lookup takes place. Examples:: # Explicitly selecting the sources and then using a processor to allow the model # method to organize the data itself, you can still provide all the necessary # ORM hints to the column. # This is definitely superior to having sources=['get_address']. address = columns.TextColumn("Address", sources=['street', 'city', 'state', 'zip'], processor=attrgetter('get_address'))
[ "Looks", "up", "attr", "on", "the", "target", "value", ".", "If", "the", "result", "is", "a", "callable", "it", "will", "be", "called", "in", "place", "without", "arguments", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L190-L218
13,769
pivotal-energy-solutions/django-datatable-view
datatableview/helpers.py
make_processor
def make_processor(func, arg=None): """ A pre-called processor that wraps the execution of the target callable ``func``. This is useful for when ``func`` is a third party mapping function that can take your column's value and return an expected result, but doesn't understand all of the extra kwargs that get sent to processor callbacks. Because this helper proxies access to ``func``, it can hold back the extra kwargs for a successful call. ``func`` will be called once per object record, a single positional argument being the column data retrieved via the column's :py:attr:`~datatableview.columns.Column.sources` An optional ``arg`` may be given, which will be forwarded as a second positional argument to ``func``. This was originally intended to simplify using Django template filter functions as ``func``. If you need to sent more arguments, consider wrapping your ``func`` in a ``functools.partial``, and use that as ``func`` instead. """ def helper(instance, *args, **kwargs): value = kwargs.get('default_value') if value is None: value = instance if arg is not None: extra_arg = [arg] else: extra_arg = [] return func(value, *extra_arg) return helper
python
def make_processor(func, arg=None): """ A pre-called processor that wraps the execution of the target callable ``func``. This is useful for when ``func`` is a third party mapping function that can take your column's value and return an expected result, but doesn't understand all of the extra kwargs that get sent to processor callbacks. Because this helper proxies access to ``func``, it can hold back the extra kwargs for a successful call. ``func`` will be called once per object record, a single positional argument being the column data retrieved via the column's :py:attr:`~datatableview.columns.Column.sources` An optional ``arg`` may be given, which will be forwarded as a second positional argument to ``func``. This was originally intended to simplify using Django template filter functions as ``func``. If you need to sent more arguments, consider wrapping your ``func`` in a ``functools.partial``, and use that as ``func`` instead. """ def helper(instance, *args, **kwargs): value = kwargs.get('default_value') if value is None: value = instance if arg is not None: extra_arg = [arg] else: extra_arg = [] return func(value, *extra_arg) return helper
[ "def", "make_processor", "(", "func", ",", "arg", "=", "None", ")", ":", "def", "helper", "(", "instance", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "value", "=", "kwargs", ".", "get", "(", "'default_value'", ")", "if", "value", "is", "None", ":", "value", "=", "instance", "if", "arg", "is", "not", "None", ":", "extra_arg", "=", "[", "arg", "]", "else", ":", "extra_arg", "=", "[", "]", "return", "func", "(", "value", ",", "*", "extra_arg", ")", "return", "helper" ]
A pre-called processor that wraps the execution of the target callable ``func``. This is useful for when ``func`` is a third party mapping function that can take your column's value and return an expected result, but doesn't understand all of the extra kwargs that get sent to processor callbacks. Because this helper proxies access to ``func``, it can hold back the extra kwargs for a successful call. ``func`` will be called once per object record, a single positional argument being the column data retrieved via the column's :py:attr:`~datatableview.columns.Column.sources` An optional ``arg`` may be given, which will be forwarded as a second positional argument to ``func``. This was originally intended to simplify using Django template filter functions as ``func``. If you need to sent more arguments, consider wrapping your ``func`` in a ``functools.partial``, and use that as ``func`` instead.
[ "A", "pre", "-", "called", "processor", "that", "wraps", "the", "execution", "of", "the", "target", "callable", "func", "." ]
00b77a9b5051c34e258c51b06c020e92edf15034
https://github.com/pivotal-energy-solutions/django-datatable-view/blob/00b77a9b5051c34e258c51b06c020e92edf15034/datatableview/helpers.py#L402-L428
13,770
Imgur/imgurpython
examples/upload.py
upload_kitten
def upload_kitten(client): ''' Upload a picture of a kitten. We don't ship one, so get creative! ''' # Here's the metadata for the upload. All of these are optional, including # this config dict itself. config = { 'album': album, 'name': 'Catastrophe!', 'title': 'Catastrophe!', 'description': 'Cute kitten being cute on {0}'.format(datetime.now()) } print("Uploading image... ") image = client.upload_from_path(image_path, config=config, anon=False) print("Done") print() return image
python
def upload_kitten(client): ''' Upload a picture of a kitten. We don't ship one, so get creative! ''' # Here's the metadata for the upload. All of these are optional, including # this config dict itself. config = { 'album': album, 'name': 'Catastrophe!', 'title': 'Catastrophe!', 'description': 'Cute kitten being cute on {0}'.format(datetime.now()) } print("Uploading image... ") image = client.upload_from_path(image_path, config=config, anon=False) print("Done") print() return image
[ "def", "upload_kitten", "(", "client", ")", ":", "# Here's the metadata for the upload. All of these are optional, including", "# this config dict itself.", "config", "=", "{", "'album'", ":", "album", ",", "'name'", ":", "'Catastrophe!'", ",", "'title'", ":", "'Catastrophe!'", ",", "'description'", ":", "'Cute kitten being cute on {0}'", ".", "format", "(", "datetime", ".", "now", "(", ")", ")", "}", "print", "(", "\"Uploading image... \"", ")", "image", "=", "client", ".", "upload_from_path", "(", "image_path", ",", "config", "=", "config", ",", "anon", "=", "False", ")", "print", "(", "\"Done\"", ")", "print", "(", ")", "return", "image" ]
Upload a picture of a kitten. We don't ship one, so get creative!
[ "Upload", "a", "picture", "of", "a", "kitten", ".", "We", "don", "t", "ship", "one", "so", "get", "creative!" ]
48abc45a143ee9d2485c22a63b7cd55701d8163c
https://github.com/Imgur/imgurpython/blob/48abc45a143ee9d2485c22a63b7cd55701d8163c/examples/upload.py#L19-L38
13,771
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_isdst
def _isdst(dt): """Check if date is in dst. """ if type(dt) == datetime.date: dt = datetime.datetime.combine(dt, datetime.datetime.min.time()) dtc = dt.replace(year=datetime.datetime.now().year) if time.localtime(dtc.timestamp()).tm_isdst == 1: return True return False
python
def _isdst(dt): """Check if date is in dst. """ if type(dt) == datetime.date: dt = datetime.datetime.combine(dt, datetime.datetime.min.time()) dtc = dt.replace(year=datetime.datetime.now().year) if time.localtime(dtc.timestamp()).tm_isdst == 1: return True return False
[ "def", "_isdst", "(", "dt", ")", ":", "if", "type", "(", "dt", ")", "==", "datetime", ".", "date", ":", "dt", "=", "datetime", ".", "datetime", ".", "combine", "(", "dt", ",", "datetime", ".", "datetime", ".", "min", ".", "time", "(", ")", ")", "dtc", "=", "dt", ".", "replace", "(", "year", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", ")", "if", "time", ".", "localtime", "(", "dtc", ".", "timestamp", "(", ")", ")", ".", "tm_isdst", "==", "1", ":", "return", "True", "return", "False" ]
Check if date is in dst.
[ "Check", "if", "date", "is", "in", "dst", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L13-L21
13,772
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_mktime
def _mktime(time_struct): """Custom mktime because Windows can't be arsed to properly do pre-Epoch dates, probably because it's busy counting all its chromosomes. """ try: return time.mktime(time_struct) except OverflowError: dt = datetime.datetime(*time_struct[:6]) ep = datetime.datetime(1970, 1, 1) diff = dt - ep ts = diff.days * 24 * 3600 + diff.seconds + time.timezone if time_struct.tm_isdst == 1: ts -= 3600 # Guess if DST is in effect for -1 if time_struct.tm_isdst == -1 and _isdst(dt): ts -= 3600 return ts
python
def _mktime(time_struct): """Custom mktime because Windows can't be arsed to properly do pre-Epoch dates, probably because it's busy counting all its chromosomes. """ try: return time.mktime(time_struct) except OverflowError: dt = datetime.datetime(*time_struct[:6]) ep = datetime.datetime(1970, 1, 1) diff = dt - ep ts = diff.days * 24 * 3600 + diff.seconds + time.timezone if time_struct.tm_isdst == 1: ts -= 3600 # Guess if DST is in effect for -1 if time_struct.tm_isdst == -1 and _isdst(dt): ts -= 3600 return ts
[ "def", "_mktime", "(", "time_struct", ")", ":", "try", ":", "return", "time", ".", "mktime", "(", "time_struct", ")", "except", "OverflowError", ":", "dt", "=", "datetime", ".", "datetime", "(", "*", "time_struct", "[", ":", "6", "]", ")", "ep", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "diff", "=", "dt", "-", "ep", "ts", "=", "diff", ".", "days", "*", "24", "*", "3600", "+", "diff", ".", "seconds", "+", "time", ".", "timezone", "if", "time_struct", ".", "tm_isdst", "==", "1", ":", "ts", "-=", "3600", "# Guess if DST is in effect for -1", "if", "time_struct", ".", "tm_isdst", "==", "-", "1", "and", "_isdst", "(", "dt", ")", ":", "ts", "-=", "3600", "return", "ts" ]
Custom mktime because Windows can't be arsed to properly do pre-Epoch dates, probably because it's busy counting all its chromosomes.
[ "Custom", "mktime", "because", "Windows", "can", "t", "be", "arsed", "to", "properly", "do", "pre", "-", "Epoch", "dates", "probably", "because", "it", "s", "busy", "counting", "all", "its", "chromosomes", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L24-L40
13,773
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_strftime
def _strftime(pattern, time_struct=time.localtime()): """Custom strftime because Windows is shit again. """ try: return time.strftime(pattern, time_struct) except OSError: dt = datetime.datetime.fromtimestamp(_mktime(time_struct)) # This is incredibly hacky and will probably break with leap # year overlaps and shit. Any complaints should go here: # https://support.microsoft.com/ original = dt.year current = datetime.datetime.now().year dt = dt.replace(year=current) ts = dt.timestamp() if _isdst(dt): ts -= 3600 string = time.strftime(pattern, time.localtime(ts)) string = string.replace(str(current), str(original)) return string
python
def _strftime(pattern, time_struct=time.localtime()): """Custom strftime because Windows is shit again. """ try: return time.strftime(pattern, time_struct) except OSError: dt = datetime.datetime.fromtimestamp(_mktime(time_struct)) # This is incredibly hacky and will probably break with leap # year overlaps and shit. Any complaints should go here: # https://support.microsoft.com/ original = dt.year current = datetime.datetime.now().year dt = dt.replace(year=current) ts = dt.timestamp() if _isdst(dt): ts -= 3600 string = time.strftime(pattern, time.localtime(ts)) string = string.replace(str(current), str(original)) return string
[ "def", "_strftime", "(", "pattern", ",", "time_struct", "=", "time", ".", "localtime", "(", ")", ")", ":", "try", ":", "return", "time", ".", "strftime", "(", "pattern", ",", "time_struct", ")", "except", "OSError", ":", "dt", "=", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "_mktime", "(", "time_struct", ")", ")", "# This is incredibly hacky and will probably break with leap", "# year overlaps and shit. Any complaints should go here:", "# https://support.microsoft.com/", "original", "=", "dt", ".", "year", "current", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "year", "dt", "=", "dt", ".", "replace", "(", "year", "=", "current", ")", "ts", "=", "dt", ".", "timestamp", "(", ")", "if", "_isdst", "(", "dt", ")", ":", "ts", "-=", "3600", "string", "=", "time", ".", "strftime", "(", "pattern", ",", "time", ".", "localtime", "(", "ts", ")", ")", "string", "=", "string", ".", "replace", "(", "str", "(", "current", ")", ",", "str", "(", "original", ")", ")", "return", "string" ]
Custom strftime because Windows is shit again.
[ "Custom", "strftime", "because", "Windows", "is", "shit", "again", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L43-L61
13,774
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_gmtime
def _gmtime(timestamp): """Custom gmtime because yada yada. """ try: return time.gmtime(timestamp) except OSError: dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) dst = int(_isdst(dt)) return time.struct_time(dt.timetuple()[:8] + tuple([dst]))
python
def _gmtime(timestamp): """Custom gmtime because yada yada. """ try: return time.gmtime(timestamp) except OSError: dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) dst = int(_isdst(dt)) return time.struct_time(dt.timetuple()[:8] + tuple([dst]))
[ "def", "_gmtime", "(", "timestamp", ")", ":", "try", ":", "return", "time", ".", "gmtime", "(", "timestamp", ")", "except", "OSError", ":", "dt", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "dst", "=", "int", "(", "_isdst", "(", "dt", ")", ")", "return", "time", ".", "struct_time", "(", "dt", ".", "timetuple", "(", ")", "[", ":", "8", "]", "+", "tuple", "(", "[", "dst", "]", ")", ")" ]
Custom gmtime because yada yada.
[ "Custom", "gmtime", "because", "yada", "yada", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L64-L72
13,775
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_dtfromtimestamp
def _dtfromtimestamp(timestamp): """Custom datetime timestamp constructor. because Windows. again. """ try: return datetime.datetime.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(dt): timestamp += 3600 dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return dt
python
def _dtfromtimestamp(timestamp): """Custom datetime timestamp constructor. because Windows. again. """ try: return datetime.datetime.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(dt): timestamp += 3600 dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return dt
[ "def", "_dtfromtimestamp", "(", "timestamp", ")", ":", "try", ":", "return", "datetime", ".", "datetime", ".", "fromtimestamp", "(", "timestamp", ")", "except", "OSError", ":", "timestamp", "-=", "time", ".", "timezone", "dt", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "if", "_isdst", "(", "dt", ")", ":", "timestamp", "+=", "3600", "dt", "=", "datetime", ".", "datetime", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "return", "dt" ]
Custom datetime timestamp constructor. because Windows. again.
[ "Custom", "datetime", "timestamp", "constructor", ".", "because", "Windows", ".", "again", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L75-L86
13,776
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_dfromtimestamp
def _dfromtimestamp(timestamp): """Custom date timestamp constructor. ditto """ try: return datetime.date.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(d): timestamp += 3600 d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return d
python
def _dfromtimestamp(timestamp): """Custom date timestamp constructor. ditto """ try: return datetime.date.fromtimestamp(timestamp) except OSError: timestamp -= time.timezone d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) if _isdst(d): timestamp += 3600 d = datetime.date(1970, 1, 1) + datetime.timedelta(seconds=timestamp) return d
[ "def", "_dfromtimestamp", "(", "timestamp", ")", ":", "try", ":", "return", "datetime", ".", "date", ".", "fromtimestamp", "(", "timestamp", ")", "except", "OSError", ":", "timestamp", "-=", "time", ".", "timezone", "d", "=", "datetime", ".", "date", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "if", "_isdst", "(", "d", ")", ":", "timestamp", "+=", "3600", "d", "=", "datetime", ".", "date", "(", "1970", ",", "1", ",", "1", ")", "+", "datetime", ".", "timedelta", "(", "seconds", "=", "timestamp", ")", "return", "d" ]
Custom date timestamp constructor. ditto
[ "Custom", "date", "timestamp", "constructor", ".", "ditto" ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L89-L100
13,777
KoffeinFlummi/Chronyk
chronyk/chronyk.py
guesstype
def guesstype(timestr): """Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed """ timestr_full = " {} ".format(timestr) if timestr_full.find(" in ") != -1 or timestr_full.find(" ago ") != -1: return Chronyk(timestr) comps = ["second", "minute", "hour", "day", "week", "month", "year"] for comp in comps: if timestr_full.find(comp) != -1: return ChronykDelta(timestr) return Chronyk(timestr)
python
def guesstype(timestr): """Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed """ timestr_full = " {} ".format(timestr) if timestr_full.find(" in ") != -1 or timestr_full.find(" ago ") != -1: return Chronyk(timestr) comps = ["second", "minute", "hour", "day", "week", "month", "year"] for comp in comps: if timestr_full.find(comp) != -1: return ChronykDelta(timestr) return Chronyk(timestr)
[ "def", "guesstype", "(", "timestr", ")", ":", "timestr_full", "=", "\" {} \"", ".", "format", "(", "timestr", ")", "if", "timestr_full", ".", "find", "(", "\" in \"", ")", "!=", "-", "1", "or", "timestr_full", ".", "find", "(", "\" ago \"", ")", "!=", "-", "1", ":", "return", "Chronyk", "(", "timestr", ")", "comps", "=", "[", "\"second\"", ",", "\"minute\"", ",", "\"hour\"", ",", "\"day\"", ",", "\"week\"", ",", "\"month\"", ",", "\"year\"", "]", "for", "comp", "in", "comps", ":", "if", "timestr_full", ".", "find", "(", "comp", ")", "!=", "-", "1", ":", "return", "ChronykDelta", "(", "timestr", ")", "return", "Chronyk", "(", "timestr", ")" ]
Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed
[ "Tries", "to", "guess", "whether", "a", "string", "represents", "a", "time", "or", "a", "time", "delta", "and", "returns", "the", "appropriate", "object", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L112-L128
13,778
KoffeinFlummi/Chronyk
chronyk/chronyk.py
_round
def _round(num): """A custom rounding function that's a bit more 'strict'. """ deci = num - math.floor(num) if deci > 0.8: return int(math.floor(num) + 1) else: return int(math.floor(num))
python
def _round(num): """A custom rounding function that's a bit more 'strict'. """ deci = num - math.floor(num) if deci > 0.8: return int(math.floor(num) + 1) else: return int(math.floor(num))
[ "def", "_round", "(", "num", ")", ":", "deci", "=", "num", "-", "math", ".", "floor", "(", "num", ")", "if", "deci", ">", "0.8", ":", "return", "int", "(", "math", ".", "floor", "(", "num", ")", "+", "1", ")", "else", ":", "return", "int", "(", "math", ".", "floor", "(", "num", ")", ")" ]
A custom rounding function that's a bit more 'strict'.
[ "A", "custom", "rounding", "function", "that", "s", "a", "bit", "more", "strict", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L131-L138
13,779
KoffeinFlummi/Chronyk
chronyk/chronyk.py
Chronyk.datetime
def datetime(self, timezone=None): """Returns a datetime object. This object retains all information, including timezones. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone return _dtfromtimestamp(self.__timestamp__ - timezone)
python
def datetime(self, timezone=None): """Returns a datetime object. This object retains all information, including timezones. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone return _dtfromtimestamp(self.__timestamp__ - timezone)
[ "def", "datetime", "(", "self", ",", "timezone", "=", "None", ")", ":", "if", "timezone", "is", "None", ":", "timezone", "=", "self", ".", "timezone", "return", "_dtfromtimestamp", "(", "self", ".", "__timestamp__", "-", "timezone", ")" ]
Returns a datetime object. This object retains all information, including timezones. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ.
[ "Returns", "a", "datetime", "object", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L499-L512
13,780
KoffeinFlummi/Chronyk
chronyk/chronyk.py
Chronyk.ctime
def ctime(self, timezone=None): """Returns a ctime string. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone return time.ctime(self.__timestamp__ - timezone)
python
def ctime(self, timezone=None): """Returns a ctime string. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone return time.ctime(self.__timestamp__ - timezone)
[ "def", "ctime", "(", "self", ",", "timezone", "=", "None", ")", ":", "if", "timezone", "is", "None", ":", "timezone", "=", "self", ".", "timezone", "return", "time", ".", "ctime", "(", "self", ".", "__timestamp__", "-", "timezone", ")" ]
Returns a ctime string. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ.
[ "Returns", "a", "ctime", "string", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L541-L552
13,781
KoffeinFlummi/Chronyk
chronyk/chronyk.py
Chronyk.timestring
def timestring(self, pattern="%Y-%m-%d %H:%M:%S", timezone=None): """Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone timestamp = self.__timestamp__ - timezone timestamp -= LOCALTZ return _strftime(pattern, _gmtime(timestamp))
python
def timestring(self, pattern="%Y-%m-%d %H:%M:%S", timezone=None): """Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ. """ if timezone is None: timezone = self.timezone timestamp = self.__timestamp__ - timezone timestamp -= LOCALTZ return _strftime(pattern, _gmtime(timestamp))
[ "def", "timestring", "(", "self", ",", "pattern", "=", "\"%Y-%m-%d %H:%M:%S\"", ",", "timezone", "=", "None", ")", ":", "if", "timezone", "is", "None", ":", "timezone", "=", "self", ".", "timezone", "timestamp", "=", "self", ".", "__timestamp__", "-", "timezone", "timestamp", "-=", "LOCALTZ", "return", "_strftime", "(", "pattern", ",", "_gmtime", "(", "timestamp", ")", ")" ]
Returns a time string. :param pattern = "%Y-%m-%d %H:%M:%S" The format used. By default, an ISO-type format is used. The syntax here is identical to the one used by time.strftime() and time.strptime(). :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ.
[ "Returns", "a", "time", "string", "." ]
5a9f3518d2e831884dea7e8c077d6e7350df2fbe
https://github.com/KoffeinFlummi/Chronyk/blob/5a9f3518d2e831884dea7e8c077d6e7350df2fbe/chronyk/chronyk.py#L554-L572
13,782
sjkingo/python-freshdesk
freshdesk/v2/api.py
TicketAPI.get_ticket
def get_ticket(self, ticket_id): """Fetches the ticket for the given ticket ID""" url = 'tickets/%d' % ticket_id ticket = self._api._get(url) return Ticket(**ticket)
python
def get_ticket(self, ticket_id): """Fetches the ticket for the given ticket ID""" url = 'tickets/%d' % ticket_id ticket = self._api._get(url) return Ticket(**ticket)
[ "def", "get_ticket", "(", "self", ",", "ticket_id", ")", ":", "url", "=", "'tickets/%d'", "%", "ticket_id", "ticket", "=", "self", ".", "_api", ".", "_get", "(", "url", ")", "return", "Ticket", "(", "*", "*", "ticket", ")" ]
Fetches the ticket for the given ticket ID
[ "Fetches", "the", "ticket", "for", "the", "given", "ticket", "ID" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L11-L15
13,783
sjkingo/python-freshdesk
freshdesk/v2/api.py
TicketAPI.create_outbound_email
def create_outbound_email(self, subject, description, email, email_config_id, **kwargs): """Creates an outbound email""" url = 'tickets/outbound_email' priority = kwargs.get('priority', 1) data = { 'subject': subject, 'description': description, 'priority': priority, 'email': email, 'email_config_id': email_config_id, } data.update(kwargs) ticket = self._api._post(url, data=json.dumps(data)) return Ticket(**ticket)
python
def create_outbound_email(self, subject, description, email, email_config_id, **kwargs): """Creates an outbound email""" url = 'tickets/outbound_email' priority = kwargs.get('priority', 1) data = { 'subject': subject, 'description': description, 'priority': priority, 'email': email, 'email_config_id': email_config_id, } data.update(kwargs) ticket = self._api._post(url, data=json.dumps(data)) return Ticket(**ticket)
[ "def", "create_outbound_email", "(", "self", ",", "subject", ",", "description", ",", "email", ",", "email_config_id", ",", "*", "*", "kwargs", ")", ":", "url", "=", "'tickets/outbound_email'", "priority", "=", "kwargs", ".", "get", "(", "'priority'", ",", "1", ")", "data", "=", "{", "'subject'", ":", "subject", ",", "'description'", ":", "description", ",", "'priority'", ":", "priority", ",", "'email'", ":", "email", ",", "'email_config_id'", ":", "email_config_id", ",", "}", "data", ".", "update", "(", "kwargs", ")", "ticket", "=", "self", ".", "_api", ".", "_post", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ")", "return", "Ticket", "(", "*", "*", "ticket", ")" ]
Creates an outbound email
[ "Creates", "an", "outbound", "email" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L53-L66
13,784
sjkingo/python-freshdesk
freshdesk/v2/api.py
TicketAPI.update_ticket
def update_ticket(self, ticket_id, **kwargs): """Updates a ticket from a given ticket ID""" url = 'tickets/%d' % ticket_id ticket = self._api._put(url, data=json.dumps(kwargs)) return Ticket(**ticket)
python
def update_ticket(self, ticket_id, **kwargs): """Updates a ticket from a given ticket ID""" url = 'tickets/%d' % ticket_id ticket = self._api._put(url, data=json.dumps(kwargs)) return Ticket(**ticket)
[ "def", "update_ticket", "(", "self", ",", "ticket_id", ",", "*", "*", "kwargs", ")", ":", "url", "=", "'tickets/%d'", "%", "ticket_id", "ticket", "=", "self", ".", "_api", ".", "_put", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "kwargs", ")", ")", "return", "Ticket", "(", "*", "*", "ticket", ")" ]
Updates a ticket from a given ticket ID
[ "Updates", "a", "ticket", "from", "a", "given", "ticket", "ID" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L68-L72
13,785
sjkingo/python-freshdesk
freshdesk/v2/api.py
AgentAPI.get_agent
def get_agent(self, agent_id): """Fetches the agent for the given agent ID""" url = 'agents/%s' % agent_id return Agent(**self._api._get(url))
python
def get_agent(self, agent_id): """Fetches the agent for the given agent ID""" url = 'agents/%s' % agent_id return Agent(**self._api._get(url))
[ "def", "get_agent", "(", "self", ",", "agent_id", ")", ":", "url", "=", "'agents/%s'", "%", "agent_id", "return", "Agent", "(", "*", "*", "self", ".", "_api", ".", "_get", "(", "url", ")", ")" ]
Fetches the agent for the given agent ID
[ "Fetches", "the", "agent", "for", "the", "given", "agent", "ID" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L360-L363
13,786
sjkingo/python-freshdesk
freshdesk/v2/api.py
AgentAPI.update_agent
def update_agent(self, agent_id, **kwargs): """Updates an agent""" url = 'agents/%s' % agent_id agent = self._api._put(url, data=json.dumps(kwargs)) return Agent(**agent)
python
def update_agent(self, agent_id, **kwargs): """Updates an agent""" url = 'agents/%s' % agent_id agent = self._api._put(url, data=json.dumps(kwargs)) return Agent(**agent)
[ "def", "update_agent", "(", "self", ",", "agent_id", ",", "*", "*", "kwargs", ")", ":", "url", "=", "'agents/%s'", "%", "agent_id", "agent", "=", "self", ".", "_api", ".", "_put", "(", "url", ",", "data", "=", "json", ".", "dumps", "(", "kwargs", ")", ")", "return", "Agent", "(", "*", "*", "agent", ")" ]
Updates an agent
[ "Updates", "an", "agent" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v2/api.py#L365-L369
13,787
sjkingo/python-freshdesk
freshdesk/v1/api.py
API._action
def _action(self, res): """Returns JSON response or raise exception if errors are present""" try: j = res.json() except: res.raise_for_status() j = {} if 'Retry-After' in res.headers: raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.' 'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After'])) if 'require_login' in j: raise HTTPError('403 Forbidden: API key is incorrect for this domain') if 'error' in j: raise HTTPError('{}: {}'.format(j.get('description'), j.get('errors'))) # Catch any other errors try: res.raise_for_status() except Exception as e: raise HTTPError("{}: {}".format(e, j)) return j
python
def _action(self, res): """Returns JSON response or raise exception if errors are present""" try: j = res.json() except: res.raise_for_status() j = {} if 'Retry-After' in res.headers: raise HTTPError('403 Forbidden: API rate-limit has been reached until {}.' 'See http://freshdesk.com/api#ratelimit'.format(res.headers['Retry-After'])) if 'require_login' in j: raise HTTPError('403 Forbidden: API key is incorrect for this domain') if 'error' in j: raise HTTPError('{}: {}'.format(j.get('description'), j.get('errors'))) # Catch any other errors try: res.raise_for_status() except Exception as e: raise HTTPError("{}: {}".format(e, j)) return j
[ "def", "_action", "(", "self", ",", "res", ")", ":", "try", ":", "j", "=", "res", ".", "json", "(", ")", "except", ":", "res", ".", "raise_for_status", "(", ")", "j", "=", "{", "}", "if", "'Retry-After'", "in", "res", ".", "headers", ":", "raise", "HTTPError", "(", "'403 Forbidden: API rate-limit has been reached until {}.'", "'See http://freshdesk.com/api#ratelimit'", ".", "format", "(", "res", ".", "headers", "[", "'Retry-After'", "]", ")", ")", "if", "'require_login'", "in", "j", ":", "raise", "HTTPError", "(", "'403 Forbidden: API key is incorrect for this domain'", ")", "if", "'error'", "in", "j", ":", "raise", "HTTPError", "(", "'{}: {}'", ".", "format", "(", "j", ".", "get", "(", "'description'", ")", ",", "j", ".", "get", "(", "'errors'", ")", ")", ")", "# Catch any other errors", "try", ":", "res", ".", "raise_for_status", "(", ")", "except", "Exception", "as", "e", ":", "raise", "HTTPError", "(", "\"{}: {}\"", ".", "format", "(", "e", ",", "j", ")", ")", "return", "j" ]
Returns JSON response or raise exception if errors are present
[ "Returns", "JSON", "response", "or", "raise", "exception", "if", "errors", "are", "present" ]
39edca5d86e73de5619b1d082d9d8b5c0ae626c8
https://github.com/sjkingo/python-freshdesk/blob/39edca5d86e73de5619b1d082d9d8b5c0ae626c8/freshdesk/v1/api.py#L293-L318
13,788
pysal/mapclassify
mapclassify/classifiers.py
headTail_breaks
def headTail_breaks(values, cuts): """ head tail breaks helper function """ values = np.array(values) mean = np.mean(values) cuts.append(mean) if len(values) > 1: return headTail_breaks(values[values >= mean], cuts) return cuts
python
def headTail_breaks(values, cuts): """ head tail breaks helper function """ values = np.array(values) mean = np.mean(values) cuts.append(mean) if len(values) > 1: return headTail_breaks(values[values >= mean], cuts) return cuts
[ "def", "headTail_breaks", "(", "values", ",", "cuts", ")", ":", "values", "=", "np", ".", "array", "(", "values", ")", "mean", "=", "np", ".", "mean", "(", "values", ")", "cuts", ".", "append", "(", "mean", ")", "if", "len", "(", "values", ")", ">", "1", ":", "return", "headTail_breaks", "(", "values", "[", "values", ">=", "mean", "]", ",", "cuts", ")", "return", "cuts" ]
head tail breaks helper function
[ "head", "tail", "breaks", "helper", "function" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L35-L44
13,789
pysal/mapclassify
mapclassify/classifiers.py
quantile
def quantile(y, k=4): """ Calculates the quantiles for an array Parameters ---------- y : array (n,1), values to classify k : int number of quantiles Returns ------- q : array (n,1), quantile values Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(1000) >>> mc.classifiers.quantile(x) array([249.75, 499.5 , 749.25, 999. ]) >>> mc.classifiers.quantile(x, k = 3) array([333., 666., 999.]) Note that if there are enough ties that the quantile values repeat, we collapse to pseudo quantiles in which case the number of classes will be less than k >>> x = [1.0] * 100 >>> x.extend([3.0] * 40) >>> len(x) 140 >>> y = np.array(x) >>> mc.classifiers.quantile(y) array([1., 3.]) """ w = 100. / k p = np.arange(w, 100 + w, w) if p[-1] > 100.0: p[-1] = 100.0 q = np.array([stats.scoreatpercentile(y, pct) for pct in p]) q = np.unique(q) k_q = len(q) if k_q < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % k_q, UserWarning) return q
python
def quantile(y, k=4): """ Calculates the quantiles for an array Parameters ---------- y : array (n,1), values to classify k : int number of quantiles Returns ------- q : array (n,1), quantile values Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(1000) >>> mc.classifiers.quantile(x) array([249.75, 499.5 , 749.25, 999. ]) >>> mc.classifiers.quantile(x, k = 3) array([333., 666., 999.]) Note that if there are enough ties that the quantile values repeat, we collapse to pseudo quantiles in which case the number of classes will be less than k >>> x = [1.0] * 100 >>> x.extend([3.0] * 40) >>> len(x) 140 >>> y = np.array(x) >>> mc.classifiers.quantile(y) array([1., 3.]) """ w = 100. / k p = np.arange(w, 100 + w, w) if p[-1] > 100.0: p[-1] = 100.0 q = np.array([stats.scoreatpercentile(y, pct) for pct in p]) q = np.unique(q) k_q = len(q) if k_q < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % k_q, UserWarning) return q
[ "def", "quantile", "(", "y", ",", "k", "=", "4", ")", ":", "w", "=", "100.", "/", "k", "p", "=", "np", ".", "arange", "(", "w", ",", "100", "+", "w", ",", "w", ")", "if", "p", "[", "-", "1", "]", ">", "100.0", ":", "p", "[", "-", "1", "]", "=", "100.0", "q", "=", "np", ".", "array", "(", "[", "stats", ".", "scoreatpercentile", "(", "y", ",", "pct", ")", "for", "pct", "in", "p", "]", ")", "q", "=", "np", ".", "unique", "(", "q", ")", "k_q", "=", "len", "(", "q", ")", "if", "k_q", "<", "k", ":", "Warn", "(", "'Warning: Not enough unique values in array to form k classes'", ",", "UserWarning", ")", "Warn", "(", "'Warning: setting k to %d'", "%", "k_q", ",", "UserWarning", ")", "return", "q" ]
Calculates the quantiles for an array Parameters ---------- y : array (n,1), values to classify k : int number of quantiles Returns ------- q : array (n,1), quantile values Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(1000) >>> mc.classifiers.quantile(x) array([249.75, 499.5 , 749.25, 999. ]) >>> mc.classifiers.quantile(x, k = 3) array([333., 666., 999.]) Note that if there are enough ties that the quantile values repeat, we collapse to pseudo quantiles in which case the number of classes will be less than k >>> x = [1.0] * 100 >>> x.extend([3.0] * 40) >>> len(x) 140 >>> y = np.array(x) >>> mc.classifiers.quantile(y) array([1., 3.])
[ "Calculates", "the", "quantiles", "for", "an", "array" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L47-L97
13,790
pysal/mapclassify
mapclassify/classifiers.py
bin1d
def bin1d(x, bins): """ Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25]) """ left = [-float("inf")] left.extend(bins[0:-1]) right = bins cuts = list(zip(left, right)) k = len(bins) binIds = np.zeros(x.shape, dtype='int') while cuts: k -= 1 l, r = cuts.pop(-1) binIds += (x > l) * (x <= r) * k counts = np.bincount(binIds, minlength=len(bins)) return (binIds, counts)
python
def bin1d(x, bins): """ Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25]) """ left = [-float("inf")] left.extend(bins[0:-1]) right = bins cuts = list(zip(left, right)) k = len(bins) binIds = np.zeros(x.shape, dtype='int') while cuts: k -= 1 l, r = cuts.pop(-1) binIds += (x > l) * (x <= r) * k counts = np.bincount(binIds, minlength=len(bins)) return (binIds, counts)
[ "def", "bin1d", "(", "x", ",", "bins", ")", ":", "left", "=", "[", "-", "float", "(", "\"inf\"", ")", "]", "left", ".", "extend", "(", "bins", "[", "0", ":", "-", "1", "]", ")", "right", "=", "bins", "cuts", "=", "list", "(", "zip", "(", "left", ",", "right", ")", ")", "k", "=", "len", "(", "bins", ")", "binIds", "=", "np", ".", "zeros", "(", "x", ".", "shape", ",", "dtype", "=", "'int'", ")", "while", "cuts", ":", "k", "-=", "1", "l", ",", "r", "=", "cuts", ".", "pop", "(", "-", "1", ")", "binIds", "+=", "(", "x", ">", "l", ")", "*", "(", "x", "<=", "r", ")", "*", "k", "counts", "=", "np", ".", "bincount", "(", "binIds", ",", "minlength", "=", "len", "(", "bins", ")", ")", "return", "(", "binIds", ",", "counts", ")" ]
Place values of a 1-d array into bins and determine counts of values in each bin Parameters ---------- x : array (n, 1), values to bin bins : array (k,1), upper bounds of each bin (monotonic) Returns ------- binIds : array 1-d array of integer bin Ids counts : int number of elements of x falling in each bin Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(100, dtype = 'float') >>> bins = [25, 74, 100] >>> binIds, counts = mc.classifiers.bin1d(x, bins) >>> binIds array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]) >>> counts array([26, 49, 25])
[ "Place", "values", "of", "a", "1", "-", "d", "array", "into", "bins", "and", "determine", "counts", "of", "values", "in", "each", "bin" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L231-L278
13,791
pysal/mapclassify
mapclassify/classifiers.py
_kmeans
def _kmeans(y, k=5): """ Helper function to do kmeans in one dimension """ y = y * 1. # KMEANS needs float or double dtype centroids = KMEANS(y, k)[0] centroids.sort() try: class_ids = np.abs(y - centroids).argmin(axis=1) except: class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1) uc = np.unique(class_ids) cuts = np.array([y[class_ids == c].max() for c in uc]) y_cent = np.zeros_like(y) for c in uc: y_cent[class_ids == c] = centroids[c] diffs = y - y_cent diffs *= diffs return class_ids, cuts, diffs.sum(), centroids
python
def _kmeans(y, k=5): """ Helper function to do kmeans in one dimension """ y = y * 1. # KMEANS needs float or double dtype centroids = KMEANS(y, k)[0] centroids.sort() try: class_ids = np.abs(y - centroids).argmin(axis=1) except: class_ids = np.abs(y[:, np.newaxis] - centroids).argmin(axis=1) uc = np.unique(class_ids) cuts = np.array([y[class_ids == c].max() for c in uc]) y_cent = np.zeros_like(y) for c in uc: y_cent[class_ids == c] = centroids[c] diffs = y - y_cent diffs *= diffs return class_ids, cuts, diffs.sum(), centroids
[ "def", "_kmeans", "(", "y", ",", "k", "=", "5", ")", ":", "y", "=", "y", "*", "1.", "# KMEANS needs float or double dtype", "centroids", "=", "KMEANS", "(", "y", ",", "k", ")", "[", "0", "]", "centroids", ".", "sort", "(", ")", "try", ":", "class_ids", "=", "np", ".", "abs", "(", "y", "-", "centroids", ")", ".", "argmin", "(", "axis", "=", "1", ")", "except", ":", "class_ids", "=", "np", ".", "abs", "(", "y", "[", ":", ",", "np", ".", "newaxis", "]", "-", "centroids", ")", ".", "argmin", "(", "axis", "=", "1", ")", "uc", "=", "np", ".", "unique", "(", "class_ids", ")", "cuts", "=", "np", ".", "array", "(", "[", "y", "[", "class_ids", "==", "c", "]", ".", "max", "(", ")", "for", "c", "in", "uc", "]", ")", "y_cent", "=", "np", ".", "zeros_like", "(", "y", ")", "for", "c", "in", "uc", ":", "y_cent", "[", "class_ids", "==", "c", "]", "=", "centroids", "[", "c", "]", "diffs", "=", "y", "-", "y_cent", "diffs", "*=", "diffs", "return", "class_ids", ",", "cuts", ",", "diffs", ".", "sum", "(", ")", ",", "centroids" ]
Helper function to do kmeans in one dimension
[ "Helper", "function", "to", "do", "kmeans", "in", "one", "dimension" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L289-L310
13,792
pysal/mapclassify
mapclassify/classifiers.py
natural_breaks
def natural_breaks(values, k=5): """ natural breaks helper function Jenks natural breaks is kmeans in one dimension """ values = np.array(values) uv = np.unique(values) uvk = len(uv) if uvk < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % uvk, UserWarning) k = uvk kres = _kmeans(values, k) sids = kres[-1] # centroids fit = kres[-2] class_ids = kres[0] cuts = kres[1] return (sids, class_ids, fit, cuts)
python
def natural_breaks(values, k=5): """ natural breaks helper function Jenks natural breaks is kmeans in one dimension """ values = np.array(values) uv = np.unique(values) uvk = len(uv) if uvk < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % uvk, UserWarning) k = uvk kres = _kmeans(values, k) sids = kres[-1] # centroids fit = kres[-2] class_ids = kres[0] cuts = kres[1] return (sids, class_ids, fit, cuts)
[ "def", "natural_breaks", "(", "values", ",", "k", "=", "5", ")", ":", "values", "=", "np", ".", "array", "(", "values", ")", "uv", "=", "np", ".", "unique", "(", "values", ")", "uvk", "=", "len", "(", "uv", ")", "if", "uvk", "<", "k", ":", "Warn", "(", "'Warning: Not enough unique values in array to form k classes'", ",", "UserWarning", ")", "Warn", "(", "'Warning: setting k to %d'", "%", "uvk", ",", "UserWarning", ")", "k", "=", "uvk", "kres", "=", "_kmeans", "(", "values", ",", "k", ")", "sids", "=", "kres", "[", "-", "1", "]", "# centroids", "fit", "=", "kres", "[", "-", "2", "]", "class_ids", "=", "kres", "[", "0", "]", "cuts", "=", "kres", "[", "1", "]", "return", "(", "sids", ",", "class_ids", ",", "fit", ",", "cuts", ")" ]
natural breaks helper function Jenks natural breaks is kmeans in one dimension
[ "natural", "breaks", "helper", "function" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L313-L332
13,793
pysal/mapclassify
mapclassify/classifiers.py
_fit
def _fit(y, classes): """Calculate the total sum of squares for a vector y classified into classes Parameters ---------- y : array (n,1), variable to be classified classes : array (k,1), integer values denoting class membership """ tss = 0 for class_def in classes: yc = y[class_def] css = yc - yc.mean() css *= css tss += sum(css) return tss
python
def _fit(y, classes): """Calculate the total sum of squares for a vector y classified into classes Parameters ---------- y : array (n,1), variable to be classified classes : array (k,1), integer values denoting class membership """ tss = 0 for class_def in classes: yc = y[class_def] css = yc - yc.mean() css *= css tss += sum(css) return tss
[ "def", "_fit", "(", "y", ",", "classes", ")", ":", "tss", "=", "0", "for", "class_def", "in", "classes", ":", "yc", "=", "y", "[", "class_def", "]", "css", "=", "yc", "-", "yc", ".", "mean", "(", ")", "css", "*=", "css", "tss", "+=", "sum", "(", "css", ")", "return", "tss" ]
Calculate the total sum of squares for a vector y classified into classes Parameters ---------- y : array (n,1), variable to be classified classes : array (k,1), integer values denoting class membership
[ "Calculate", "the", "total", "sum", "of", "squares", "for", "a", "vector", "y", "classified", "into", "classes" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2226-L2245
13,794
pysal/mapclassify
mapclassify/classifiers.py
gadf
def gadf(y, method="Quantiles", maxk=15, pct=0.8): """ Evaluate the Goodness of Absolute Deviation Fit of a Classifier Finds the minimum value of k for which gadf>pct Parameters ---------- y : array (n, 1) values to be classified method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'} maxk : int maximum value of k to evaluate pct : float The percentage of GADF to exceed Returns ------- k : int number of classes cl : object instance of the classifier at k gadf : float goodness of absolute deviation fit Examples -------- >>> import mapclassify as mc >>> cal = mc.load_example() >>> qgadf = mc.classifiers.gadf(cal) >>> qgadf[0] 15 >>> qgadf[-1] 0.3740257590909283 Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to 0.2 we see quintiles as a result >>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2) >>> qgadf2[0] 5 >>> qgadf2[-1] 0.21710231966462412 >>> Notes ----- The GADF is defined as: .. math:: GADF = 1 - \sum_c \sum_{i \in c} |y_i - y_{c,med}| / \sum_i |y_i - y_{med}| where :math:`y_{med}` is the global median and :math:`y_{c,med}` is the median for class :math:`c`. See Also -------- K_classifiers """ y = np.array(y) adam = (np.abs(y - np.median(y))).sum() for k in range(2, maxk + 1): cl = kmethods[method](y, k) gadf = 1 - cl.adcm / adam if gadf > pct: break return (k, cl, gadf)
python
def gadf(y, method="Quantiles", maxk=15, pct=0.8): """ Evaluate the Goodness of Absolute Deviation Fit of a Classifier Finds the minimum value of k for which gadf>pct Parameters ---------- y : array (n, 1) values to be classified method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'} maxk : int maximum value of k to evaluate pct : float The percentage of GADF to exceed Returns ------- k : int number of classes cl : object instance of the classifier at k gadf : float goodness of absolute deviation fit Examples -------- >>> import mapclassify as mc >>> cal = mc.load_example() >>> qgadf = mc.classifiers.gadf(cal) >>> qgadf[0] 15 >>> qgadf[-1] 0.3740257590909283 Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to 0.2 we see quintiles as a result >>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2) >>> qgadf2[0] 5 >>> qgadf2[-1] 0.21710231966462412 >>> Notes ----- The GADF is defined as: .. math:: GADF = 1 - \sum_c \sum_{i \in c} |y_i - y_{c,med}| / \sum_i |y_i - y_{med}| where :math:`y_{med}` is the global median and :math:`y_{c,med}` is the median for class :math:`c`. See Also -------- K_classifiers """ y = np.array(y) adam = (np.abs(y - np.median(y))).sum() for k in range(2, maxk + 1): cl = kmethods[method](y, k) gadf = 1 - cl.adcm / adam if gadf > pct: break return (k, cl, gadf)
[ "def", "gadf", "(", "y", ",", "method", "=", "\"Quantiles\"", ",", "maxk", "=", "15", ",", "pct", "=", "0.8", ")", ":", "y", "=", "np", ".", "array", "(", "y", ")", "adam", "=", "(", "np", ".", "abs", "(", "y", "-", "np", ".", "median", "(", "y", ")", ")", ")", ".", "sum", "(", ")", "for", "k", "in", "range", "(", "2", ",", "maxk", "+", "1", ")", ":", "cl", "=", "kmethods", "[", "method", "]", "(", "y", ",", "k", ")", "gadf", "=", "1", "-", "cl", ".", "adcm", "/", "adam", "if", "gadf", ">", "pct", ":", "break", "return", "(", "k", ",", "cl", ",", "gadf", ")" ]
Evaluate the Goodness of Absolute Deviation Fit of a Classifier Finds the minimum value of k for which gadf>pct Parameters ---------- y : array (n, 1) values to be classified method : {'Quantiles, 'Fisher_Jenks', 'Maximum_Breaks', 'Natrual_Breaks'} maxk : int maximum value of k to evaluate pct : float The percentage of GADF to exceed Returns ------- k : int number of classes cl : object instance of the classifier at k gadf : float goodness of absolute deviation fit Examples -------- >>> import mapclassify as mc >>> cal = mc.load_example() >>> qgadf = mc.classifiers.gadf(cal) >>> qgadf[0] 15 >>> qgadf[-1] 0.3740257590909283 Quantiles fail to exceed 0.80 before 15 classes. If we lower the bar to 0.2 we see quintiles as a result >>> qgadf2 = mc.classifiers.gadf(cal, pct = 0.2) >>> qgadf2[0] 5 >>> qgadf2[-1] 0.21710231966462412 >>> Notes ----- The GADF is defined as: .. math:: GADF = 1 - \sum_c \sum_{i \in c} |y_i - y_{c,med}| / \sum_i |y_i - y_{med}| where :math:`y_{med}` is the global median and :math:`y_{c,med}` is the median for class :math:`c`. See Also -------- K_classifiers
[ "Evaluate", "the", "Goodness", "of", "Absolute", "Deviation", "Fit", "of", "a", "Classifier", "Finds", "the", "minimum", "value", "of", "k", "for", "which", "gadf", ">", "pct" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L2255-L2325
13,795
pysal/mapclassify
mapclassify/classifiers.py
Map_Classifier.make
def make(cls, *args, **kwargs): """ Configure and create a classifier that will consume data and produce classifications, given the configuration options specified by this function. Note that this like a *partial application* of the relevant class constructor. `make` creates a function that returns classifications; it does not actually do the classification. If you want to classify data directly, use the appropriate class constructor, like Quantiles, Max_Breaks, etc. If you *have* a classifier object, but want to find which bins new data falls into, use find_bin. Parameters ---------- *args : required positional arguments all positional arguments required by the classifier, excluding the input data. rolling : bool a boolean configuring the outputted classifier to use a rolling classifier rather than a new classifier for each input. If rolling, this adds the current data to all of the previous data in the classifier, and rebalances the bins, like a running median computation. return_object : bool a boolean configuring the outputted classifier to return the classifier object or not return_bins : bool a boolean configuring the outputted classifier to return the bins/breaks or not return_counts : bool a boolean configuring the outputted classifier to return the histogram of objects falling into each bin or not Returns ------- A function that consumes data and returns their bins (and object, bins/breaks, or counts, if requested). Note ---- This is most useful when you want to run a classifier many times with a given configuration, such as when classifying many columns of an array or dataframe using the same configuration. Examples -------- >>> import libpysal as ps >>> import mapclassify as mc >>> import geopandas as gpd >>> df = gpd.read_file(ps.examples.get_path('columbus.dbf')) >>> classifier = mc.Quantiles.make(k=9) >>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier) >>> cl["HOVAL"].values[:10] array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8]) >>> cl["CRIME"].values[:10] array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4]) >>> cl["INC"].values[:10] array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4]) >>> import pandas as pd; from numpy import linspace as lsp >>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)] >>> data = pd.DataFrame(data).T >>> data 0 1 2 0 3.000000 10.000000 -5.000000 1 3.555556 8.888889 -2.777778 2 4.111111 7.777778 -0.555556 3 4.666667 6.666667 1.666667 4 5.222222 5.555556 3.888889 5 5.777778 4.444444 6.111111 6 6.333333 3.333333 8.333333 7 6.888889 2.222222 10.555556 8 7.444444 1.111111 12.777778 9 8.000000 0.000000 15.000000 >>> data.apply(mc.Quantiles.make(rolling=True)) 0 1 2 0 0 4 0 1 0 4 0 2 1 4 0 3 1 3 0 4 2 2 1 5 2 1 2 6 3 0 4 7 3 0 4 8 4 0 4 9 4 0 4 >>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf')) >>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT') >>> my_bins = [1, 10, 20, 40, 80] >>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T] >>> len(cl) 3 >>> cl[0][:10] array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5]) """ # only flag overrides return flag to_annotate = copy.deepcopy(kwargs) return_object = kwargs.pop('return_object', False) return_bins = kwargs.pop('return_bins', False) return_counts = kwargs.pop('return_counts', False) rolling = kwargs.pop('rolling', False) if rolling: # just initialize a fake classifier data = list(range(10)) cls_instance = cls(data, *args, **kwargs) # and empty it, since we'll be using the update cls_instance.y = np.array([]) else: cls_instance = None # wrap init in a closure to make a consumer. # Qc Na: "Objects/Closures are poor man's Closures/Objects" def classifier(data, cls_instance=cls_instance): if rolling: cls_instance.update(data, inplace=True, **kwargs) yb = cls_instance.find_bin(data) else: cls_instance = cls(data, *args, **kwargs) yb = cls_instance.yb outs = [yb, None, None, None] outs[1] = cls_instance if return_object else None outs[2] = cls_instance.bins if return_bins else None outs[3] = cls_instance.counts if return_counts else None outs = [a for a in outs if a is not None] if len(outs) == 1: return outs[0] else: return outs # for debugging/jic, keep around the kwargs. # in future, we might want to make this a thin class, so that we can # set a custom repr. Call the class `Binner` or something, that's a # pre-configured Classifier that just consumes data, bins it, & # possibly updates the bins. classifier._options = to_annotate return classifier
python
def make(cls, *args, **kwargs): """ Configure and create a classifier that will consume data and produce classifications, given the configuration options specified by this function. Note that this like a *partial application* of the relevant class constructor. `make` creates a function that returns classifications; it does not actually do the classification. If you want to classify data directly, use the appropriate class constructor, like Quantiles, Max_Breaks, etc. If you *have* a classifier object, but want to find which bins new data falls into, use find_bin. Parameters ---------- *args : required positional arguments all positional arguments required by the classifier, excluding the input data. rolling : bool a boolean configuring the outputted classifier to use a rolling classifier rather than a new classifier for each input. If rolling, this adds the current data to all of the previous data in the classifier, and rebalances the bins, like a running median computation. return_object : bool a boolean configuring the outputted classifier to return the classifier object or not return_bins : bool a boolean configuring the outputted classifier to return the bins/breaks or not return_counts : bool a boolean configuring the outputted classifier to return the histogram of objects falling into each bin or not Returns ------- A function that consumes data and returns their bins (and object, bins/breaks, or counts, if requested). Note ---- This is most useful when you want to run a classifier many times with a given configuration, such as when classifying many columns of an array or dataframe using the same configuration. Examples -------- >>> import libpysal as ps >>> import mapclassify as mc >>> import geopandas as gpd >>> df = gpd.read_file(ps.examples.get_path('columbus.dbf')) >>> classifier = mc.Quantiles.make(k=9) >>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier) >>> cl["HOVAL"].values[:10] array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8]) >>> cl["CRIME"].values[:10] array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4]) >>> cl["INC"].values[:10] array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4]) >>> import pandas as pd; from numpy import linspace as lsp >>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)] >>> data = pd.DataFrame(data).T >>> data 0 1 2 0 3.000000 10.000000 -5.000000 1 3.555556 8.888889 -2.777778 2 4.111111 7.777778 -0.555556 3 4.666667 6.666667 1.666667 4 5.222222 5.555556 3.888889 5 5.777778 4.444444 6.111111 6 6.333333 3.333333 8.333333 7 6.888889 2.222222 10.555556 8 7.444444 1.111111 12.777778 9 8.000000 0.000000 15.000000 >>> data.apply(mc.Quantiles.make(rolling=True)) 0 1 2 0 0 4 0 1 0 4 0 2 1 4 0 3 1 3 0 4 2 2 1 5 2 1 2 6 3 0 4 7 3 0 4 8 4 0 4 9 4 0 4 >>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf')) >>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT') >>> my_bins = [1, 10, 20, 40, 80] >>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T] >>> len(cl) 3 >>> cl[0][:10] array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5]) """ # only flag overrides return flag to_annotate = copy.deepcopy(kwargs) return_object = kwargs.pop('return_object', False) return_bins = kwargs.pop('return_bins', False) return_counts = kwargs.pop('return_counts', False) rolling = kwargs.pop('rolling', False) if rolling: # just initialize a fake classifier data = list(range(10)) cls_instance = cls(data, *args, **kwargs) # and empty it, since we'll be using the update cls_instance.y = np.array([]) else: cls_instance = None # wrap init in a closure to make a consumer. # Qc Na: "Objects/Closures are poor man's Closures/Objects" def classifier(data, cls_instance=cls_instance): if rolling: cls_instance.update(data, inplace=True, **kwargs) yb = cls_instance.find_bin(data) else: cls_instance = cls(data, *args, **kwargs) yb = cls_instance.yb outs = [yb, None, None, None] outs[1] = cls_instance if return_object else None outs[2] = cls_instance.bins if return_bins else None outs[3] = cls_instance.counts if return_counts else None outs = [a for a in outs if a is not None] if len(outs) == 1: return outs[0] else: return outs # for debugging/jic, keep around the kwargs. # in future, we might want to make this a thin class, so that we can # set a custom repr. Call the class `Binner` or something, that's a # pre-configured Classifier that just consumes data, bins it, & # possibly updates the bins. classifier._options = to_annotate return classifier
[ "def", "make", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# only flag overrides return flag", "to_annotate", "=", "copy", ".", "deepcopy", "(", "kwargs", ")", "return_object", "=", "kwargs", ".", "pop", "(", "'return_object'", ",", "False", ")", "return_bins", "=", "kwargs", ".", "pop", "(", "'return_bins'", ",", "False", ")", "return_counts", "=", "kwargs", ".", "pop", "(", "'return_counts'", ",", "False", ")", "rolling", "=", "kwargs", ".", "pop", "(", "'rolling'", ",", "False", ")", "if", "rolling", ":", "# just initialize a fake classifier", "data", "=", "list", "(", "range", "(", "10", ")", ")", "cls_instance", "=", "cls", "(", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", "# and empty it, since we'll be using the update", "cls_instance", ".", "y", "=", "np", ".", "array", "(", "[", "]", ")", "else", ":", "cls_instance", "=", "None", "# wrap init in a closure to make a consumer.", "# Qc Na: \"Objects/Closures are poor man's Closures/Objects\"", "def", "classifier", "(", "data", ",", "cls_instance", "=", "cls_instance", ")", ":", "if", "rolling", ":", "cls_instance", ".", "update", "(", "data", ",", "inplace", "=", "True", ",", "*", "*", "kwargs", ")", "yb", "=", "cls_instance", ".", "find_bin", "(", "data", ")", "else", ":", "cls_instance", "=", "cls", "(", "data", ",", "*", "args", ",", "*", "*", "kwargs", ")", "yb", "=", "cls_instance", ".", "yb", "outs", "=", "[", "yb", ",", "None", ",", "None", ",", "None", "]", "outs", "[", "1", "]", "=", "cls_instance", "if", "return_object", "else", "None", "outs", "[", "2", "]", "=", "cls_instance", ".", "bins", "if", "return_bins", "else", "None", "outs", "[", "3", "]", "=", "cls_instance", ".", "counts", "if", "return_counts", "else", "None", "outs", "=", "[", "a", "for", "a", "in", "outs", "if", "a", "is", "not", "None", "]", "if", "len", "(", "outs", ")", "==", "1", ":", "return", "outs", "[", "0", "]", "else", ":", "return", "outs", "# for debugging/jic, keep around the kwargs.", "# in future, we might want to make this a thin class, so that we can", "# set a custom repr. Call the class `Binner` or something, that's a", "# pre-configured Classifier that just consumes data, bins it, &", "# possibly updates the bins.", "classifier", ".", "_options", "=", "to_annotate", "return", "classifier" ]
Configure and create a classifier that will consume data and produce classifications, given the configuration options specified by this function. Note that this like a *partial application* of the relevant class constructor. `make` creates a function that returns classifications; it does not actually do the classification. If you want to classify data directly, use the appropriate class constructor, like Quantiles, Max_Breaks, etc. If you *have* a classifier object, but want to find which bins new data falls into, use find_bin. Parameters ---------- *args : required positional arguments all positional arguments required by the classifier, excluding the input data. rolling : bool a boolean configuring the outputted classifier to use a rolling classifier rather than a new classifier for each input. If rolling, this adds the current data to all of the previous data in the classifier, and rebalances the bins, like a running median computation. return_object : bool a boolean configuring the outputted classifier to return the classifier object or not return_bins : bool a boolean configuring the outputted classifier to return the bins/breaks or not return_counts : bool a boolean configuring the outputted classifier to return the histogram of objects falling into each bin or not Returns ------- A function that consumes data and returns their bins (and object, bins/breaks, or counts, if requested). Note ---- This is most useful when you want to run a classifier many times with a given configuration, such as when classifying many columns of an array or dataframe using the same configuration. Examples -------- >>> import libpysal as ps >>> import mapclassify as mc >>> import geopandas as gpd >>> df = gpd.read_file(ps.examples.get_path('columbus.dbf')) >>> classifier = mc.Quantiles.make(k=9) >>> cl = df[['HOVAL', 'CRIME', 'INC']].apply(classifier) >>> cl["HOVAL"].values[:10] array([8, 7, 2, 4, 1, 3, 8, 5, 7, 8]) >>> cl["CRIME"].values[:10] array([0, 1, 3, 4, 6, 2, 0, 5, 3, 4]) >>> cl["INC"].values[:10] array([7, 8, 5, 0, 3, 5, 0, 3, 6, 4]) >>> import pandas as pd; from numpy import linspace as lsp >>> data = [lsp(3,8,num=10), lsp(10, 0, num=10), lsp(-5, 15, num=10)] >>> data = pd.DataFrame(data).T >>> data 0 1 2 0 3.000000 10.000000 -5.000000 1 3.555556 8.888889 -2.777778 2 4.111111 7.777778 -0.555556 3 4.666667 6.666667 1.666667 4 5.222222 5.555556 3.888889 5 5.777778 4.444444 6.111111 6 6.333333 3.333333 8.333333 7 6.888889 2.222222 10.555556 8 7.444444 1.111111 12.777778 9 8.000000 0.000000 15.000000 >>> data.apply(mc.Quantiles.make(rolling=True)) 0 1 2 0 0 4 0 1 0 4 0 2 1 4 0 3 1 3 0 4 2 2 1 5 2 1 2 6 3 0 4 7 3 0 4 8 4 0 4 9 4 0 4 >>> dbf = ps.io.open(ps.examples.get_path('baltim.dbf')) >>> data = dbf.by_col_array('PRICE', 'LOTSZ', 'SQFT') >>> my_bins = [1, 10, 20, 40, 80] >>> cl = [mc.User_Defined.make(bins=my_bins)(a) for a in data.T] >>> len(cl) 3 >>> cl[0][:10] array([4, 5, 5, 5, 4, 4, 5, 4, 4, 5])
[ "Configure", "and", "create", "a", "classifier", "that", "will", "consume", "data", "and", "produce", "classifications", "given", "the", "configuration", "options", "specified", "by", "this", "function", "." ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L476-L618
13,796
pysal/mapclassify
mapclassify/classifiers.py
Map_Classifier.get_tss
def get_tss(self): """ Total sum of squares around class means Returns sum of squares over all class means """ tss = 0 for class_def in self.classes: if len(class_def) > 0: yc = self.y[class_def] css = yc - yc.mean() css *= css tss += sum(css) return tss
python
def get_tss(self): """ Total sum of squares around class means Returns sum of squares over all class means """ tss = 0 for class_def in self.classes: if len(class_def) > 0: yc = self.y[class_def] css = yc - yc.mean() css *= css tss += sum(css) return tss
[ "def", "get_tss", "(", "self", ")", ":", "tss", "=", "0", "for", "class_def", "in", "self", ".", "classes", ":", "if", "len", "(", "class_def", ")", ">", "0", ":", "yc", "=", "self", ".", "y", "[", "class_def", "]", "css", "=", "yc", "-", "yc", ".", "mean", "(", ")", "css", "*=", "css", "tss", "+=", "sum", "(", "css", ")", "return", "tss" ]
Total sum of squares around class means Returns sum of squares over all class means
[ "Total", "sum", "of", "squares", "around", "class", "means" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L663-L676
13,797
pysal/mapclassify
mapclassify/classifiers.py
Map_Classifier.get_gadf
def get_gadf(self): """ Goodness of absolute deviation of fit """ adam = (np.abs(self.y - np.median(self.y))).sum() gadf = 1 - self.adcm / adam return gadf
python
def get_gadf(self): """ Goodness of absolute deviation of fit """ adam = (np.abs(self.y - np.median(self.y))).sum() gadf = 1 - self.adcm / adam return gadf
[ "def", "get_gadf", "(", "self", ")", ":", "adam", "=", "(", "np", ".", "abs", "(", "self", ".", "y", "-", "np", ".", "median", "(", "self", ".", "y", ")", ")", ")", ".", "sum", "(", ")", "gadf", "=", "1", "-", "self", ".", "adcm", "/", "adam", "return", "gadf" ]
Goodness of absolute deviation of fit
[ "Goodness", "of", "absolute", "deviation", "of", "fit" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L699-L705
13,798
pysal/mapclassify
mapclassify/classifiers.py
Map_Classifier.find_bin
def find_bin(self, x): """ Sort input or inputs according to the current bin estimate Parameters ---------- x : array or numeric a value or array of values to fit within the estimated bins Returns ------- a bin index or array of bin indices that classify the input into one of the classifiers' bins. Note that this differs from similar functionality in numpy.digitize(x, classi.bins, right=True). This will always provide the closest bin, so data "outside" the classifier, above and below the max/min breaks, will be classified into the nearest bin. numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0 for data below the lowest bin. """ x = np.asarray(x).flatten() right = np.digitize(x, self.bins, right=True) if right.max() == len(self.bins): right[right == len(self.bins)] = len(self.bins) - 1 return right
python
def find_bin(self, x): """ Sort input or inputs according to the current bin estimate Parameters ---------- x : array or numeric a value or array of values to fit within the estimated bins Returns ------- a bin index or array of bin indices that classify the input into one of the classifiers' bins. Note that this differs from similar functionality in numpy.digitize(x, classi.bins, right=True). This will always provide the closest bin, so data "outside" the classifier, above and below the max/min breaks, will be classified into the nearest bin. numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0 for data below the lowest bin. """ x = np.asarray(x).flatten() right = np.digitize(x, self.bins, right=True) if right.max() == len(self.bins): right[right == len(self.bins)] = len(self.bins) - 1 return right
[ "def", "find_bin", "(", "self", ",", "x", ")", ":", "x", "=", "np", ".", "asarray", "(", "x", ")", ".", "flatten", "(", ")", "right", "=", "np", ".", "digitize", "(", "x", ",", "self", ".", "bins", ",", "right", "=", "True", ")", "if", "right", ".", "max", "(", ")", "==", "len", "(", "self", ".", "bins", ")", ":", "right", "[", "right", "==", "len", "(", "self", ".", "bins", ")", "]", "=", "len", "(", "self", ".", "bins", ")", "-", "1", "return", "right" ]
Sort input or inputs according to the current bin estimate Parameters ---------- x : array or numeric a value or array of values to fit within the estimated bins Returns ------- a bin index or array of bin indices that classify the input into one of the classifiers' bins. Note that this differs from similar functionality in numpy.digitize(x, classi.bins, right=True). This will always provide the closest bin, so data "outside" the classifier, above and below the max/min breaks, will be classified into the nearest bin. numpy.digitize returns k+1 for data greater than the greatest bin, but retains 0 for data below the lowest bin.
[ "Sort", "input", "or", "inputs", "according", "to", "the", "current", "bin", "estimate" ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L751-L779
13,799
pysal/mapclassify
mapclassify/classifiers.py
Fisher_Jenks_Sampled.update
def update(self, y=None, inplace=False, **kwargs): """ Add data or change classification parameters. Parameters ---------- y : array (n,1) array of data to classify inplace : bool whether to conduct the update in place or to return a copy estimated from the additional specifications. Additional parameters provided in **kwargs are passed to the init function of the class. For documentation, check the class constructor. """ kwargs.update({'k': kwargs.pop('k', self.k)}) kwargs.update({'pct': kwargs.pop('pct', self.pct)}) kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)}) if inplace: self._update(y, **kwargs) else: new = copy.deepcopy(self) new._update(y, **kwargs) return new
python
def update(self, y=None, inplace=False, **kwargs): """ Add data or change classification parameters. Parameters ---------- y : array (n,1) array of data to classify inplace : bool whether to conduct the update in place or to return a copy estimated from the additional specifications. Additional parameters provided in **kwargs are passed to the init function of the class. For documentation, check the class constructor. """ kwargs.update({'k': kwargs.pop('k', self.k)}) kwargs.update({'pct': kwargs.pop('pct', self.pct)}) kwargs.update({'truncate': kwargs.pop('truncate', self._truncated)}) if inplace: self._update(y, **kwargs) else: new = copy.deepcopy(self) new._update(y, **kwargs) return new
[ "def", "update", "(", "self", ",", "y", "=", "None", ",", "inplace", "=", "False", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update", "(", "{", "'k'", ":", "kwargs", ".", "pop", "(", "'k'", ",", "self", ".", "k", ")", "}", ")", "kwargs", ".", "update", "(", "{", "'pct'", ":", "kwargs", ".", "pop", "(", "'pct'", ",", "self", ".", "pct", ")", "}", ")", "kwargs", ".", "update", "(", "{", "'truncate'", ":", "kwargs", ".", "pop", "(", "'truncate'", ",", "self", ".", "_truncated", ")", "}", ")", "if", "inplace", ":", "self", ".", "_update", "(", "y", ",", "*", "*", "kwargs", ")", "else", ":", "new", "=", "copy", ".", "deepcopy", "(", "self", ")", "new", ".", "_update", "(", "y", ",", "*", "*", "kwargs", ")", "return", "new" ]
Add data or change classification parameters. Parameters ---------- y : array (n,1) array of data to classify inplace : bool whether to conduct the update in place or to return a copy estimated from the additional specifications. Additional parameters provided in **kwargs are passed to the init function of the class. For documentation, check the class constructor.
[ "Add", "data", "or", "change", "classification", "parameters", "." ]
5b22ec33f5802becf40557614d90cd38efa1676e
https://github.com/pysal/mapclassify/blob/5b22ec33f5802becf40557614d90cd38efa1676e/mapclassify/classifiers.py#L1586-L1609