repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.overview
def overview(self, tag=None, fromdate=None, todate=None): """ Gets a brief overview of statistics for all of your outbound email. """ return self.call("GET", "/stats/outbound", tag=tag, fromdate=fromdate, todate=todate)
python
def overview(self, tag=None, fromdate=None, todate=None): """ Gets a brief overview of statistics for all of your outbound email. """ return self.call("GET", "/stats/outbound", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "overview", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets a brief overview of statistics for all of your outbound email.
[ "Gets", "a", "brief", "overview", "of", "statistics", "for", "all", "of", "your", "outbound", "email", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L8-L12
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.sends
def sends(self, tag=None, fromdate=None, todate=None): """ Gets a total count of emails you’ve sent out. """ return self.call("GET", "/stats/outbound/sends", tag=tag, fromdate=fromdate, todate=todate)
python
def sends(self, tag=None, fromdate=None, todate=None): """ Gets a total count of emails you’ve sent out. """ return self.call("GET", "/stats/outbound/sends", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "sends", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/sends\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets a total count of emails you’ve sent out.
[ "Gets", "a", "total", "count", "of", "emails", "you’ve", "sent", "out", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L14-L18
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.bounces
def bounces(self, tag=None, fromdate=None, todate=None): """ Gets total counts of emails you’ve sent out that have been returned as bounced. """ return self.call("GET", "/stats/outbound/bounces", tag=tag, fromdate=fromdate, todate=todate)
python
def bounces(self, tag=None, fromdate=None, todate=None): """ Gets total counts of emails you’ve sent out that have been returned as bounced. """ return self.call("GET", "/stats/outbound/bounces", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "bounces", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/bounces\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets total counts of emails you’ve sent out that have been returned as bounced.
[ "Gets", "total", "counts", "of", "emails", "you’ve", "sent", "out", "that", "have", "been", "returned", "as", "bounced", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L20-L24
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.spam
def spam(self, tag=None, fromdate=None, todate=None): """ Gets a total count of recipients who have marked your email as spam. """ return self.call("GET", "/stats/outbound/spam", tag=tag, fromdate=fromdate, todate=todate)
python
def spam(self, tag=None, fromdate=None, todate=None): """ Gets a total count of recipients who have marked your email as spam. """ return self.call("GET", "/stats/outbound/spam", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "spam", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/spam\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets a total count of recipients who have marked your email as spam.
[ "Gets", "a", "total", "count", "of", "recipients", "who", "have", "marked", "your", "email", "as", "spam", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L26-L30
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.tracked
def tracked(self, tag=None, fromdate=None, todate=None): """ Gets a total count of emails you’ve sent with open tracking or link tracking enabled. """ return self.call("GET", "/stats/outbound/tracked", tag=tag, fromdate=fromdate, todate=todate)
python
def tracked(self, tag=None, fromdate=None, todate=None): """ Gets a total count of emails you’ve sent with open tracking or link tracking enabled. """ return self.call("GET", "/stats/outbound/tracked", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "tracked", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/tracked\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets a total count of emails you’ve sent with open tracking or link tracking enabled.
[ "Gets", "a", "total", "count", "of", "emails", "you’ve", "sent", "with", "open", "tracking", "or", "link", "tracking", "enabled", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L32-L36
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.opens
def opens(self, tag=None, fromdate=None, todate=None): """ Gets total counts of recipients who opened your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens", tag=tag, fromdate=fromdate, todate=todate)
python
def opens(self, tag=None, fromdate=None, todate=None): """ Gets total counts of recipients who opened your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "opens", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/opens\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets total counts of recipients who opened your emails. This is only recorded when open tracking is enabled for that email.
[ "Gets", "total", "counts", "of", "recipients", "who", "opened", "your", "emails", ".", "This", "is", "only", "recorded", "when", "open", "tracking", "is", "enabled", "for", "that", "email", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L38-L43
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.opens_platforms
def opens_platforms(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the platforms used to open your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens/platforms", tag=tag, fromdate=fromdate, todate=todate)
python
def opens_platforms(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the platforms used to open your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens/platforms", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "opens_platforms", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/opens/platforms\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets an overview of the platforms used to open your emails. This is only recorded when open tracking is enabled for that email.
[ "Gets", "an", "overview", "of", "the", "platforms", "used", "to", "open", "your", "emails", ".", "This", "is", "only", "recorded", "when", "open", "tracking", "is", "enabled", "for", "that", "email", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L45-L50
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.emailclients
def emailclients(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens/emailclients", tag=tag, fromdate=fromdate, todate=todate)
python
def emailclients(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/opens/emailclients", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "emailclients", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/opens/emailclients\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email.
[ "Gets", "an", "overview", "of", "the", "email", "clients", "used", "to", "open", "your", "emails", ".", "This", "is", "only", "recorded", "when", "open", "tracking", "is", "enabled", "for", "that", "email", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L52-L57
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.readtimes
def readtimes(self, tag=None, fromdate=None, todate=None): """ Gets the length of time that recipients read emails along with counts for each time. This is only recorded when open tracking is enabled for that email. Read time tracking stops at 20 seconds, so any read times above that will appear in the 20s+ field. """ return self.call("GET", "/stats/outbound/opens/readtimes", tag=tag, fromdate=fromdate, todate=todate)
python
def readtimes(self, tag=None, fromdate=None, todate=None): """ Gets the length of time that recipients read emails along with counts for each time. This is only recorded when open tracking is enabled for that email. Read time tracking stops at 20 seconds, so any read times above that will appear in the 20s+ field. """ return self.call("GET", "/stats/outbound/opens/readtimes", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "readtimes", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/opens/readtimes\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets the length of time that recipients read emails along with counts for each time. This is only recorded when open tracking is enabled for that email. Read time tracking stops at 20 seconds, so any read times above that will appear in the 20s+ field.
[ "Gets", "the", "length", "of", "time", "that", "recipients", "read", "emails", "along", "with", "counts", "for", "each", "time", ".", "This", "is", "only", "recorded", "when", "open", "tracking", "is", "enabled", "for", "that", "email", ".", "Read", "time", "tracking", "stops", "at", "20", "seconds", "so", "any", "read", "times", "above", "that", "will", "appear", "in", "the", "20s", "+", "field", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L59-L65
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.clicks
def clicks(self, tag=None, fromdate=None, todate=None): """ Gets total counts of unique links that were clicked. """ return self.call("GET", "/stats/outbound/clicks", tag=tag, fromdate=fromdate, todate=todate)
python
def clicks(self, tag=None, fromdate=None, todate=None): """ Gets total counts of unique links that were clicked. """ return self.call("GET", "/stats/outbound/clicks", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "clicks", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/clicks\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets total counts of unique links that were clicked.
[ "Gets", "total", "counts", "of", "unique", "links", "that", "were", "clicked", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L67-L71
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.browserfamilies
def browserfamilies(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the browsers used to open links in your emails. This is only recorded when Link Tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/clicks/browserfamilies", tag=tag, fromdate=fromdate, todate=todate)
python
def browserfamilies(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the browsers used to open links in your emails. This is only recorded when Link Tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/clicks/browserfamilies", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "browserfamilies", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/clicks/browserfamilies\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets an overview of the browsers used to open links in your emails. This is only recorded when Link Tracking is enabled for that email.
[ "Gets", "an", "overview", "of", "the", "browsers", "used", "to", "open", "links", "in", "your", "emails", ".", "This", "is", "only", "recorded", "when", "Link", "Tracking", "is", "enabled", "for", "that", "email", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L73-L78
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.clicks_platforms
def clicks_platforms(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the browser platforms used to open your emails. This is only recorded when Link Tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/clicks/platforms", tag=tag, fromdate=fromdate, todate=todate)
python
def clicks_platforms(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the browser platforms used to open your emails. This is only recorded when Link Tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/clicks/platforms", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "clicks_platforms", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/clicks/platforms\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets an overview of the browser platforms used to open your emails. This is only recorded when Link Tracking is enabled for that email.
[ "Gets", "an", "overview", "of", "the", "browser", "platforms", "used", "to", "open", "your", "emails", ".", "This", "is", "only", "recorded", "when", "Link", "Tracking", "is", "enabled", "for", "that", "email", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L80-L85
Stranger6667/postmarker
postmarker/models/stats.py
StatsManager.location
def location(self, tag=None, fromdate=None, todate=None): """ Gets an overview of which part of the email links were clicked from (HTML or Text). This is only recorded when Link Tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/clicks/location", tag=tag, fromdate=fromdate, todate=todate)
python
def location(self, tag=None, fromdate=None, todate=None): """ Gets an overview of which part of the email links were clicked from (HTML or Text). This is only recorded when Link Tracking is enabled for that email. """ return self.call("GET", "/stats/outbound/clicks/location", tag=tag, fromdate=fromdate, todate=todate)
[ "def", "location", "(", "self", ",", "tag", "=", "None", ",", "fromdate", "=", "None", ",", "todate", "=", "None", ")", ":", "return", "self", ".", "call", "(", "\"GET\"", ",", "\"/stats/outbound/clicks/location\"", ",", "tag", "=", "tag", ",", "fromdate", "=", "fromdate", ",", "todate", "=", "todate", ")" ]
Gets an overview of which part of the email links were clicked from (HTML or Text). This is only recorded when Link Tracking is enabled for that email.
[ "Gets", "an", "overview", "of", "which", "part", "of", "the", "email", "links", "were", "clicked", "from", "(", "HTML", "or", "Text", ")", ".", "This", "is", "only", "recorded", "when", "Link", "Tracking", "is", "enabled", "for", "that", "email", "." ]
train
https://github.com/Stranger6667/postmarker/blob/013224ab1761e95c488c7d2701e6fa83f3108d94/postmarker/models/stats.py#L87-L92
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.line_rate
def line_rate(self, filename=None): """ Return the global line rate of the coverage report. If the `filename` file is given, return the line rate of the file. """ if filename is None: el = self.xml else: el = self._get_class_element_by_filename(filename) return float(el.attrib['line-rate'])
python
def line_rate(self, filename=None): """ Return the global line rate of the coverage report. If the `filename` file is given, return the line rate of the file. """ if filename is None: el = self.xml else: el = self._get_class_element_by_filename(filename) return float(el.attrib['line-rate'])
[ "def", "line_rate", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "el", "=", "self", ".", "xml", "else", ":", "el", "=", "self", ".", "_get_class_element_by_filename", "(", "filename", ")", "return", "float", "(", "el", ".", "attrib", "[", "'line-rate'", "]", ")" ]
Return the global line rate of the coverage report. If the `filename` file is given, return the line rate of the file.
[ "Return", "the", "global", "line", "rate", "of", "the", "coverage", "report", ".", "If", "the", "filename", "file", "is", "given", "return", "the", "line", "rate", "of", "the", "file", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L93-L103
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.branch_rate
def branch_rate(self, filename=None): """ Return the global branch rate of the coverage report. If the `filename` file is given, return the branch rate of the file. """ if filename is None: el = self.xml else: el = self._get_class_element_by_filename(filename) return float(el.attrib['branch-rate'])
python
def branch_rate(self, filename=None): """ Return the global branch rate of the coverage report. If the `filename` file is given, return the branch rate of the file. """ if filename is None: el = self.xml else: el = self._get_class_element_by_filename(filename) return float(el.attrib['branch-rate'])
[ "def", "branch_rate", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "el", "=", "self", ".", "xml", "else", ":", "el", "=", "self", ".", "_get_class_element_by_filename", "(", "filename", ")", "return", "float", "(", "el", ".", "attrib", "[", "'branch-rate'", "]", ")" ]
Return the global branch rate of the coverage report. If the `filename` file is given, return the branch rate of the file.
[ "Return", "the", "global", "branch", "rate", "of", "the", "coverage", "report", ".", "If", "the", "filename", "file", "is", "given", "return", "the", "branch", "rate", "of", "the", "file", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L105-L115
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.missed_statements
def missed_statements(self, filename): """ Return a list of uncovered line numbers for each of the missed statements found for the file `filename`. """ el = self._get_class_element_by_filename(filename) lines = el.xpath('./lines/line[@hits=0]') return [int(l.attrib['number']) for l in lines]
python
def missed_statements(self, filename): """ Return a list of uncovered line numbers for each of the missed statements found for the file `filename`. """ el = self._get_class_element_by_filename(filename) lines = el.xpath('./lines/line[@hits=0]') return [int(l.attrib['number']) for l in lines]
[ "def", "missed_statements", "(", "self", ",", "filename", ")", ":", "el", "=", "self", ".", "_get_class_element_by_filename", "(", "filename", ")", "lines", "=", "el", ".", "xpath", "(", "'./lines/line[@hits=0]'", ")", "return", "[", "int", "(", "l", ".", "attrib", "[", "'number'", "]", ")", "for", "l", "in", "lines", "]" ]
Return a list of uncovered line numbers for each of the missed statements found for the file `filename`.
[ "Return", "a", "list", "of", "uncovered", "line", "numbers", "for", "each", "of", "the", "missed", "statements", "found", "for", "the", "file", "filename", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L118-L125
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.line_statuses
def line_statuses(self, filename): """ Return a list of tuples `(lineno, status)` of all the lines found in the Cobertura report for the given file `filename` where `lineno` is the line number and `status` is coverage status of the line which can be either `True` (line hit) or `False` (line miss). """ line_elements = self._get_lines_by_filename(filename) lines_w_status = [] for line in line_elements: lineno = int(line.attrib['number']) status = line.attrib['hits'] != '0' lines_w_status.append((lineno, status)) return lines_w_status
python
def line_statuses(self, filename): """ Return a list of tuples `(lineno, status)` of all the lines found in the Cobertura report for the given file `filename` where `lineno` is the line number and `status` is coverage status of the line which can be either `True` (line hit) or `False` (line miss). """ line_elements = self._get_lines_by_filename(filename) lines_w_status = [] for line in line_elements: lineno = int(line.attrib['number']) status = line.attrib['hits'] != '0' lines_w_status.append((lineno, status)) return lines_w_status
[ "def", "line_statuses", "(", "self", ",", "filename", ")", ":", "line_elements", "=", "self", ".", "_get_lines_by_filename", "(", "filename", ")", "lines_w_status", "=", "[", "]", "for", "line", "in", "line_elements", ":", "lineno", "=", "int", "(", "line", ".", "attrib", "[", "'number'", "]", ")", "status", "=", "line", ".", "attrib", "[", "'hits'", "]", "!=", "'0'", "lines_w_status", ".", "append", "(", "(", "lineno", ",", "status", ")", ")", "return", "lines_w_status" ]
Return a list of tuples `(lineno, status)` of all the lines found in the Cobertura report for the given file `filename` where `lineno` is the line number and `status` is coverage status of the line which can be either `True` (line hit) or `False` (line miss).
[ "Return", "a", "list", "of", "tuples", "(", "lineno", "status", ")", "of", "all", "the", "lines", "found", "in", "the", "Cobertura", "report", "for", "the", "given", "file", "filename", "where", "lineno", "is", "the", "line", "number", "and", "status", "is", "coverage", "status", "of", "the", "line", "which", "can", "be", "either", "True", "(", "line", "hit", ")", "or", "False", "(", "line", "miss", ")", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L137-L152
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.missed_lines
def missed_lines(self, filename): """ Return a list of extrapolated uncovered line numbers for the file `filename` according to `Cobertura.line_statuses`. """ statuses = self.line_statuses(filename) statuses = extrapolate_coverage(statuses) return [lno for lno, status in statuses if status is False]
python
def missed_lines(self, filename): """ Return a list of extrapolated uncovered line numbers for the file `filename` according to `Cobertura.line_statuses`. """ statuses = self.line_statuses(filename) statuses = extrapolate_coverage(statuses) return [lno for lno, status in statuses if status is False]
[ "def", "missed_lines", "(", "self", ",", "filename", ")", ":", "statuses", "=", "self", ".", "line_statuses", "(", "filename", ")", "statuses", "=", "extrapolate_coverage", "(", "statuses", ")", "return", "[", "lno", "for", "lno", ",", "status", "in", "statuses", "if", "status", "is", "False", "]" ]
Return a list of extrapolated uncovered line numbers for the file `filename` according to `Cobertura.line_statuses`.
[ "Return", "a", "list", "of", "extrapolated", "uncovered", "line", "numbers", "for", "the", "file", "filename", "according", "to", "Cobertura", ".", "line_statuses", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L154-L161
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.file_source
def file_source(self, filename): """ Return a list of namedtuple `Line` for each line of code found in the source file with the given `filename`. """ lines = [] try: with self.filesystem.open(filename) as f: line_statuses = dict(self.line_statuses(filename)) for lineno, source in enumerate(f, start=1): line_status = line_statuses.get(lineno) line = Line(lineno, source, line_status, None) lines.append(line) except self.filesystem.FileNotFound as file_not_found: lines.append( Line(0, '%s not found' % file_not_found.path, None, None) ) return lines
python
def file_source(self, filename): """ Return a list of namedtuple `Line` for each line of code found in the source file with the given `filename`. """ lines = [] try: with self.filesystem.open(filename) as f: line_statuses = dict(self.line_statuses(filename)) for lineno, source in enumerate(f, start=1): line_status = line_statuses.get(lineno) line = Line(lineno, source, line_status, None) lines.append(line) except self.filesystem.FileNotFound as file_not_found: lines.append( Line(0, '%s not found' % file_not_found.path, None, None) ) return lines
[ "def", "file_source", "(", "self", ",", "filename", ")", ":", "lines", "=", "[", "]", "try", ":", "with", "self", ".", "filesystem", ".", "open", "(", "filename", ")", "as", "f", ":", "line_statuses", "=", "dict", "(", "self", ".", "line_statuses", "(", "filename", ")", ")", "for", "lineno", ",", "source", "in", "enumerate", "(", "f", ",", "start", "=", "1", ")", ":", "line_status", "=", "line_statuses", ".", "get", "(", "lineno", ")", "line", "=", "Line", "(", "lineno", ",", "source", ",", "line_status", ",", "None", ")", "lines", ".", "append", "(", "line", ")", "except", "self", ".", "filesystem", ".", "FileNotFound", "as", "file_not_found", ":", "lines", ".", "append", "(", "Line", "(", "0", ",", "'%s not found'", "%", "file_not_found", ".", "path", ",", "None", ",", "None", ")", ")", "return", "lines" ]
Return a list of namedtuple `Line` for each line of code found in the source file with the given `filename`.
[ "Return", "a", "list", "of", "namedtuple", "Line", "for", "each", "line", "of", "code", "found", "in", "the", "source", "file", "with", "the", "given", "filename", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L164-L183
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.total_misses
def total_misses(self, filename=None): """ Return the total number of uncovered statements for the file `filename`. If `filename` is not given, return the total number of uncovered statements for all files. """ if filename is not None: return len(self.missed_statements(filename)) total = 0 for filename in self.files(): total += len(self.missed_statements(filename)) return total
python
def total_misses(self, filename=None): """ Return the total number of uncovered statements for the file `filename`. If `filename` is not given, return the total number of uncovered statements for all files. """ if filename is not None: return len(self.missed_statements(filename)) total = 0 for filename in self.files(): total += len(self.missed_statements(filename)) return total
[ "def", "total_misses", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "not", "None", ":", "return", "len", "(", "self", ".", "missed_statements", "(", "filename", ")", ")", "total", "=", "0", "for", "filename", "in", "self", ".", "files", "(", ")", ":", "total", "+=", "len", "(", "self", ".", "missed_statements", "(", "filename", ")", ")", "return", "total" ]
Return the total number of uncovered statements for the file `filename`. If `filename` is not given, return the total number of uncovered statements for all files.
[ "Return", "the", "total", "number", "of", "uncovered", "statements", "for", "the", "file", "filename", ".", "If", "filename", "is", "not", "given", "return", "the", "total", "number", "of", "uncovered", "statements", "for", "all", "files", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L185-L198
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.total_hits
def total_hits(self, filename=None): """ Return the total number of covered statements for the file `filename`. If `filename` is not given, return the total number of covered statements for all files. """ if filename is not None: return len(self.hit_statements(filename)) total = 0 for filename in self.files(): total += len(self.hit_statements(filename)) return total
python
def total_hits(self, filename=None): """ Return the total number of covered statements for the file `filename`. If `filename` is not given, return the total number of covered statements for all files. """ if filename is not None: return len(self.hit_statements(filename)) total = 0 for filename in self.files(): total += len(self.hit_statements(filename)) return total
[ "def", "total_hits", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "not", "None", ":", "return", "len", "(", "self", ".", "hit_statements", "(", "filename", ")", ")", "total", "=", "0", "for", "filename", "in", "self", ".", "files", "(", ")", ":", "total", "+=", "len", "(", "self", ".", "hit_statements", "(", "filename", ")", ")", "return", "total" ]
Return the total number of covered statements for the file `filename`. If `filename` is not given, return the total number of covered statements for all files.
[ "Return", "the", "total", "number", "of", "covered", "statements", "for", "the", "file", "filename", ".", "If", "filename", "is", "not", "given", "return", "the", "total", "number", "of", "covered", "statements", "for", "all", "files", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L200-L213
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.total_statements
def total_statements(self, filename=None): """ Return the total number of statements for the file `filename`. If `filename` is not given, return the total number of statements for all files. """ if filename is not None: statements = self._get_lines_by_filename(filename) return len(statements) total = 0 for filename in self.files(): statements = self._get_lines_by_filename(filename) total += len(statements) return total
python
def total_statements(self, filename=None): """ Return the total number of statements for the file `filename`. If `filename` is not given, return the total number of statements for all files. """ if filename is not None: statements = self._get_lines_by_filename(filename) return len(statements) total = 0 for filename in self.files(): statements = self._get_lines_by_filename(filename) total += len(statements) return total
[ "def", "total_statements", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "not", "None", ":", "statements", "=", "self", ".", "_get_lines_by_filename", "(", "filename", ")", "return", "len", "(", "statements", ")", "total", "=", "0", "for", "filename", "in", "self", ".", "files", "(", ")", ":", "statements", "=", "self", ".", "_get_lines_by_filename", "(", "filename", ")", "total", "+=", "len", "(", "statements", ")", "return", "total" ]
Return the total number of statements for the file `filename`. If `filename` is not given, return the total number of statements for all files.
[ "Return", "the", "total", "number", "of", "statements", "for", "the", "file", "filename", ".", "If", "filename", "is", "not", "given", "return", "the", "total", "number", "of", "statements", "for", "all", "files", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L215-L230
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.files
def files(self): """ Return the list of available files in the coverage report. """ # maybe replace with a trie at some point? see has_file FIXME already_seen = set() filenames = [] for el in self.xml.xpath("//class"): filename = el.attrib['filename'] if filename in already_seen: continue already_seen.add(filename) filenames.append(filename) return filenames
python
def files(self): """ Return the list of available files in the coverage report. """ # maybe replace with a trie at some point? see has_file FIXME already_seen = set() filenames = [] for el in self.xml.xpath("//class"): filename = el.attrib['filename'] if filename in already_seen: continue already_seen.add(filename) filenames.append(filename) return filenames
[ "def", "files", "(", "self", ")", ":", "# maybe replace with a trie at some point? see has_file FIXME", "already_seen", "=", "set", "(", ")", "filenames", "=", "[", "]", "for", "el", "in", "self", ".", "xml", ".", "xpath", "(", "\"//class\"", ")", ":", "filename", "=", "el", ".", "attrib", "[", "'filename'", "]", "if", "filename", "in", "already_seen", ":", "continue", "already_seen", ".", "add", "(", "filename", ")", "filenames", ".", "append", "(", "filename", ")", "return", "filenames" ]
Return the list of available files in the coverage report.
[ "Return", "the", "list", "of", "available", "files", "in", "the", "coverage", "report", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L233-L248
aconrad/pycobertura
pycobertura/cobertura.py
Cobertura.source_lines
def source_lines(self, filename): """ Return a list for source lines of file `filename`. """ with self.filesystem.open(filename) as f: return f.readlines()
python
def source_lines(self, filename): """ Return a list for source lines of file `filename`. """ with self.filesystem.open(filename) as f: return f.readlines()
[ "def", "source_lines", "(", "self", ",", "filename", ")", ":", "with", "self", ".", "filesystem", ".", "open", "(", "filename", ")", "as", "f", ":", "return", "f", ".", "readlines", "(", ")" ]
Return a list for source lines of file `filename`.
[ "Return", "a", "list", "for", "source", "lines", "of", "file", "filename", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L259-L264
aconrad/pycobertura
pycobertura/cobertura.py
CoberturaDiff.has_better_coverage
def has_better_coverage(self): """ Return `True` if coverage of has improved, `False` otherwise. This does not ensure that all changes have been covered. If this is what you want, use `CoberturaDiff.has_all_changes_covered()` instead. """ for filename in self.files(): if self.diff_total_misses(filename) > 0: return False return True
python
def has_better_coverage(self): """ Return `True` if coverage of has improved, `False` otherwise. This does not ensure that all changes have been covered. If this is what you want, use `CoberturaDiff.has_all_changes_covered()` instead. """ for filename in self.files(): if self.diff_total_misses(filename) > 0: return False return True
[ "def", "has_better_coverage", "(", "self", ")", ":", "for", "filename", "in", "self", ".", "files", "(", ")", ":", "if", "self", ".", "diff_total_misses", "(", "filename", ")", ">", "0", ":", "return", "False", "return", "True" ]
Return `True` if coverage of has improved, `False` otherwise. This does not ensure that all changes have been covered. If this is what you want, use `CoberturaDiff.has_all_changes_covered()` instead.
[ "Return", "True", "if", "coverage", "of", "has", "improved", "False", "otherwise", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L282-L292
aconrad/pycobertura
pycobertura/cobertura.py
CoberturaDiff.has_all_changes_covered
def has_all_changes_covered(self): """ Return `True` if all changes have been covered, `False` otherwise. """ for filename in self.files(): for hunk in self.file_source_hunks(filename): for line in hunk: if line.reason is None: continue # line untouched if line.status is False: return False # line not covered return True
python
def has_all_changes_covered(self): """ Return `True` if all changes have been covered, `False` otherwise. """ for filename in self.files(): for hunk in self.file_source_hunks(filename): for line in hunk: if line.reason is None: continue # line untouched if line.status is False: return False # line not covered return True
[ "def", "has_all_changes_covered", "(", "self", ")", ":", "for", "filename", "in", "self", ".", "files", "(", ")", ":", "for", "hunk", "in", "self", ".", "file_source_hunks", "(", "filename", ")", ":", "for", "line", "in", "hunk", ":", "if", "line", ".", "reason", "is", "None", ":", "continue", "# line untouched", "if", "line", ".", "status", "is", "False", ":", "return", "False", "# line not covered", "return", "True" ]
Return `True` if all changes have been covered, `False` otherwise.
[ "Return", "True", "if", "all", "changes", "have", "been", "covered", "False", "otherwise", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L294-L305
aconrad/pycobertura
pycobertura/cobertura.py
CoberturaDiff._diff_attr
def _diff_attr(self, attr_name, filename): """ Return the difference between `self.cobertura2.<attr_name>(filename)` and `self.cobertura1.<attr_name>(filename)`. This generic method is meant to diff the count of methods that return counts for a given file `filename`, e.g. `Cobertura.total_statements`, `Cobertura.total_misses`, ... The returned count may be a float. """ if filename is not None: files = [filename] else: files = self.files() total_count = 0.0 for filename in files: if self.cobertura1.has_file(filename): method = getattr(self.cobertura1, attr_name) count1 = method(filename) else: count1 = 0.0 method = getattr(self.cobertura2, attr_name) count2 = method(filename) total_count += count2 - count1 return total_count
python
def _diff_attr(self, attr_name, filename): """ Return the difference between `self.cobertura2.<attr_name>(filename)` and `self.cobertura1.<attr_name>(filename)`. This generic method is meant to diff the count of methods that return counts for a given file `filename`, e.g. `Cobertura.total_statements`, `Cobertura.total_misses`, ... The returned count may be a float. """ if filename is not None: files = [filename] else: files = self.files() total_count = 0.0 for filename in files: if self.cobertura1.has_file(filename): method = getattr(self.cobertura1, attr_name) count1 = method(filename) else: count1 = 0.0 method = getattr(self.cobertura2, attr_name) count2 = method(filename) total_count += count2 - count1 return total_count
[ "def", "_diff_attr", "(", "self", ",", "attr_name", ",", "filename", ")", ":", "if", "filename", "is", "not", "None", ":", "files", "=", "[", "filename", "]", "else", ":", "files", "=", "self", ".", "files", "(", ")", "total_count", "=", "0.0", "for", "filename", "in", "files", ":", "if", "self", ".", "cobertura1", ".", "has_file", "(", "filename", ")", ":", "method", "=", "getattr", "(", "self", ".", "cobertura1", ",", "attr_name", ")", "count1", "=", "method", "(", "filename", ")", "else", ":", "count1", "=", "0.0", "method", "=", "getattr", "(", "self", ".", "cobertura2", ",", "attr_name", ")", "count2", "=", "method", "(", "filename", ")", "total_count", "+=", "count2", "-", "count1", "return", "total_count" ]
Return the difference between `self.cobertura2.<attr_name>(filename)` and `self.cobertura1.<attr_name>(filename)`. This generic method is meant to diff the count of methods that return counts for a given file `filename`, e.g. `Cobertura.total_statements`, `Cobertura.total_misses`, ... The returned count may be a float.
[ "Return", "the", "difference", "between", "self", ".", "cobertura2", ".", "<attr_name", ">", "(", "filename", ")", "and", "self", ".", "cobertura1", ".", "<attr_name", ">", "(", "filename", ")", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L307-L335
aconrad/pycobertura
pycobertura/cobertura.py
CoberturaDiff.diff_missed_lines
def diff_missed_lines(self, filename): """ Return a list of 2-element tuples `(lineno, is_new)` for the given file `filename` where `lineno` is a missed line number and `is_new` indicates whether the missed line was introduced (True) or removed (False). """ line_changed = [] for line in self.file_source(filename): if line.status is not None: is_new = not line.status line_changed.append((line.number, is_new)) return line_changed
python
def diff_missed_lines(self, filename): """ Return a list of 2-element tuples `(lineno, is_new)` for the given file `filename` where `lineno` is a missed line number and `is_new` indicates whether the missed line was introduced (True) or removed (False). """ line_changed = [] for line in self.file_source(filename): if line.status is not None: is_new = not line.status line_changed.append((line.number, is_new)) return line_changed
[ "def", "diff_missed_lines", "(", "self", ",", "filename", ")", ":", "line_changed", "=", "[", "]", "for", "line", "in", "self", ".", "file_source", "(", "filename", ")", ":", "if", "line", ".", "status", "is", "not", "None", ":", "is_new", "=", "not", "line", ".", "status", "line_changed", ".", "append", "(", "(", "line", ".", "number", ",", "is_new", ")", ")", "return", "line_changed" ]
Return a list of 2-element tuples `(lineno, is_new)` for the given file `filename` where `lineno` is a missed line number and `is_new` indicates whether the missed line was introduced (True) or removed (False).
[ "Return", "a", "list", "of", "2", "-", "element", "tuples", "(", "lineno", "is_new", ")", "for", "the", "given", "file", "filename", "where", "lineno", "is", "a", "missed", "line", "number", "and", "is_new", "indicates", "whether", "the", "missed", "line", "was", "introduced", "(", "True", ")", "or", "removed", "(", "False", ")", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L351-L363
aconrad/pycobertura
pycobertura/cobertura.py
CoberturaDiff.file_source
def file_source(self, filename): """ Return a list of namedtuple `Line` for each line of code found in the given file `filename`. """ if self.cobertura1.has_file(filename) and \ self.cobertura1.filesystem.has_file(filename): lines1 = self.cobertura1.source_lines(filename) line_statuses1 = dict(self.cobertura1.line_statuses( filename)) else: lines1 = [] line_statuses1 = {} lines2 = self.cobertura2.source_lines(filename) line_statuses2 = dict(self.cobertura2.line_statuses(filename)) # Build a dict of lineno2 -> lineno1 lineno_map = reconcile_lines(lines2, lines1) lines = [] for lineno, source in enumerate(lines2, start=1): status = None reason = None if lineno not in lineno_map: # line was added or removed, just use whatever coverage status # is available as there is nothing to compare against. status = line_statuses2.get(lineno) reason = 'line-edit' else: other_lineno = lineno_map[lineno] line_status1 = line_statuses1.get(other_lineno) line_status2 = line_statuses2.get(lineno) if line_status1 is line_status2: status = None # unchanged reason = None elif line_status1 is True and line_status2 is False: status = False # decreased reason = 'cov-down' elif line_status1 is False and line_status2 is True: status = True # increased reason = 'cov-up' line = Line(lineno, source, status, reason) lines.append(line) return lines
python
def file_source(self, filename): """ Return a list of namedtuple `Line` for each line of code found in the given file `filename`. """ if self.cobertura1.has_file(filename) and \ self.cobertura1.filesystem.has_file(filename): lines1 = self.cobertura1.source_lines(filename) line_statuses1 = dict(self.cobertura1.line_statuses( filename)) else: lines1 = [] line_statuses1 = {} lines2 = self.cobertura2.source_lines(filename) line_statuses2 = dict(self.cobertura2.line_statuses(filename)) # Build a dict of lineno2 -> lineno1 lineno_map = reconcile_lines(lines2, lines1) lines = [] for lineno, source in enumerate(lines2, start=1): status = None reason = None if lineno not in lineno_map: # line was added or removed, just use whatever coverage status # is available as there is nothing to compare against. status = line_statuses2.get(lineno) reason = 'line-edit' else: other_lineno = lineno_map[lineno] line_status1 = line_statuses1.get(other_lineno) line_status2 = line_statuses2.get(lineno) if line_status1 is line_status2: status = None # unchanged reason = None elif line_status1 is True and line_status2 is False: status = False # decreased reason = 'cov-down' elif line_status1 is False and line_status2 is True: status = True # increased reason = 'cov-up' line = Line(lineno, source, status, reason) lines.append(line) return lines
[ "def", "file_source", "(", "self", ",", "filename", ")", ":", "if", "self", ".", "cobertura1", ".", "has_file", "(", "filename", ")", "and", "self", ".", "cobertura1", ".", "filesystem", ".", "has_file", "(", "filename", ")", ":", "lines1", "=", "self", ".", "cobertura1", ".", "source_lines", "(", "filename", ")", "line_statuses1", "=", "dict", "(", "self", ".", "cobertura1", ".", "line_statuses", "(", "filename", ")", ")", "else", ":", "lines1", "=", "[", "]", "line_statuses1", "=", "{", "}", "lines2", "=", "self", ".", "cobertura2", ".", "source_lines", "(", "filename", ")", "line_statuses2", "=", "dict", "(", "self", ".", "cobertura2", ".", "line_statuses", "(", "filename", ")", ")", "# Build a dict of lineno2 -> lineno1", "lineno_map", "=", "reconcile_lines", "(", "lines2", ",", "lines1", ")", "lines", "=", "[", "]", "for", "lineno", ",", "source", "in", "enumerate", "(", "lines2", ",", "start", "=", "1", ")", ":", "status", "=", "None", "reason", "=", "None", "if", "lineno", "not", "in", "lineno_map", ":", "# line was added or removed, just use whatever coverage status", "# is available as there is nothing to compare against.", "status", "=", "line_statuses2", ".", "get", "(", "lineno", ")", "reason", "=", "'line-edit'", "else", ":", "other_lineno", "=", "lineno_map", "[", "lineno", "]", "line_status1", "=", "line_statuses1", ".", "get", "(", "other_lineno", ")", "line_status2", "=", "line_statuses2", ".", "get", "(", "lineno", ")", "if", "line_status1", "is", "line_status2", ":", "status", "=", "None", "# unchanged", "reason", "=", "None", "elif", "line_status1", "is", "True", "and", "line_status2", "is", "False", ":", "status", "=", "False", "# decreased", "reason", "=", "'cov-down'", "elif", "line_status1", "is", "False", "and", "line_status2", "is", "True", ":", "status", "=", "True", "# increased", "reason", "=", "'cov-up'", "line", "=", "Line", "(", "lineno", ",", "source", ",", "status", ",", "reason", ")", "lines", ".", "append", "(", "line", ")", "return", "lines" ]
Return a list of namedtuple `Line` for each line of code found in the given file `filename`.
[ "Return", "a", "list", "of", "namedtuple", "Line", "for", "each", "line", "of", "code", "found", "in", "the", "given", "file", "filename", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L371-L418
aconrad/pycobertura
pycobertura/cobertura.py
CoberturaDiff.file_source_hunks
def file_source_hunks(self, filename): """ Like `CoberturaDiff.file_source`, but returns a list of line hunks of the lines that have changed for the given file `filename`. An empty list means that the file has no lines that have a change in coverage status. """ lines = self.file_source(filename) hunks = hunkify_lines(lines) return hunks
python
def file_source_hunks(self, filename): """ Like `CoberturaDiff.file_source`, but returns a list of line hunks of the lines that have changed for the given file `filename`. An empty list means that the file has no lines that have a change in coverage status. """ lines = self.file_source(filename) hunks = hunkify_lines(lines) return hunks
[ "def", "file_source_hunks", "(", "self", ",", "filename", ")", ":", "lines", "=", "self", ".", "file_source", "(", "filename", ")", "hunks", "=", "hunkify_lines", "(", "lines", ")", "return", "hunks" ]
Like `CoberturaDiff.file_source`, but returns a list of line hunks of the lines that have changed for the given file `filename`. An empty list means that the file has no lines that have a change in coverage status.
[ "Like", "CoberturaDiff", ".", "file_source", "but", "returns", "a", "list", "of", "line", "hunks", "of", "the", "lines", "that", "have", "changed", "for", "the", "given", "file", "filename", ".", "An", "empty", "list", "means", "that", "the", "file", "has", "no", "lines", "that", "have", "a", "change", "in", "coverage", "status", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L420-L429
bufferapp/kiner
kiner/producer.py
KinesisProducer.monitor
def monitor(self): """Flushes the queue periodically.""" while self.monitor_running.is_set(): if time.time() - self.last_flush > self.batch_time: if not self.queue.empty(): logger.info("Queue Flush: time without flush exceeded") self.flush_queue() time.sleep(self.batch_time)
python
def monitor(self): """Flushes the queue periodically.""" while self.monitor_running.is_set(): if time.time() - self.last_flush > self.batch_time: if not self.queue.empty(): logger.info("Queue Flush: time without flush exceeded") self.flush_queue() time.sleep(self.batch_time)
[ "def", "monitor", "(", "self", ")", ":", "while", "self", ".", "monitor_running", ".", "is_set", "(", ")", ":", "if", "time", ".", "time", "(", ")", "-", "self", ".", "last_flush", ">", "self", ".", "batch_time", ":", "if", "not", "self", ".", "queue", ".", "empty", "(", ")", ":", "logger", ".", "info", "(", "\"Queue Flush: time without flush exceeded\"", ")", "self", ".", "flush_queue", "(", ")", "time", ".", "sleep", "(", "self", ".", "batch_time", ")" ]
Flushes the queue periodically.
[ "Flushes", "the", "queue", "periodically", "." ]
train
https://github.com/bufferapp/kiner/blob/c4c6e7ef24e5600864d0bdc4c8b967b339245238/kiner/producer.py#L63-L70
bufferapp/kiner
kiner/producer.py
KinesisProducer.put_records
def put_records(self, records, partition_key=None): """Add a list of data records to the record queue in the proper format. Convinience method that calls self.put_record for each element. Parameters ---------- records : list Lists of records to send. partition_key: str Hash that determines which shard a given data record belongs to. """ for record in records: self.put_record(record, partition_key)
python
def put_records(self, records, partition_key=None): """Add a list of data records to the record queue in the proper format. Convinience method that calls self.put_record for each element. Parameters ---------- records : list Lists of records to send. partition_key: str Hash that determines which shard a given data record belongs to. """ for record in records: self.put_record(record, partition_key)
[ "def", "put_records", "(", "self", ",", "records", ",", "partition_key", "=", "None", ")", ":", "for", "record", "in", "records", ":", "self", ".", "put_record", "(", "record", ",", "partition_key", ")" ]
Add a list of data records to the record queue in the proper format. Convinience method that calls self.put_record for each element. Parameters ---------- records : list Lists of records to send. partition_key: str Hash that determines which shard a given data record belongs to.
[ "Add", "a", "list", "of", "data", "records", "to", "the", "record", "queue", "in", "the", "proper", "format", ".", "Convinience", "method", "that", "calls", "self", ".", "put_record", "for", "each", "element", "." ]
train
https://github.com/bufferapp/kiner/blob/c4c6e7ef24e5600864d0bdc4c8b967b339245238/kiner/producer.py#L72-L85
bufferapp/kiner
kiner/producer.py
KinesisProducer.put_record
def put_record(self, data, partition_key=None): """Add data to the record queue in the proper format. Parameters ---------- data : str Data to send. partition_key: str Hash that determines which shard a given data record belongs to. """ # Byte encode the data data = encode_data(data) # Create a random partition key if not provided if not partition_key: partition_key = uuid.uuid4().hex # Build the record record = { 'Data': data, 'PartitionKey': partition_key } # Flush the queue if it reaches the batch size if self.queue.qsize() >= self.batch_size: logger.info("Queue Flush: batch size reached") self.pool.submit(self.flush_queue) # Append the record logger.debug('Putting record "{}"'.format(record['Data'][:100])) self.queue.put(record)
python
def put_record(self, data, partition_key=None): """Add data to the record queue in the proper format. Parameters ---------- data : str Data to send. partition_key: str Hash that determines which shard a given data record belongs to. """ # Byte encode the data data = encode_data(data) # Create a random partition key if not provided if not partition_key: partition_key = uuid.uuid4().hex # Build the record record = { 'Data': data, 'PartitionKey': partition_key } # Flush the queue if it reaches the batch size if self.queue.qsize() >= self.batch_size: logger.info("Queue Flush: batch size reached") self.pool.submit(self.flush_queue) # Append the record logger.debug('Putting record "{}"'.format(record['Data'][:100])) self.queue.put(record)
[ "def", "put_record", "(", "self", ",", "data", ",", "partition_key", "=", "None", ")", ":", "# Byte encode the data", "data", "=", "encode_data", "(", "data", ")", "# Create a random partition key if not provided", "if", "not", "partition_key", ":", "partition_key", "=", "uuid", ".", "uuid4", "(", ")", ".", "hex", "# Build the record", "record", "=", "{", "'Data'", ":", "data", ",", "'PartitionKey'", ":", "partition_key", "}", "# Flush the queue if it reaches the batch size", "if", "self", ".", "queue", ".", "qsize", "(", ")", ">=", "self", ".", "batch_size", ":", "logger", ".", "info", "(", "\"Queue Flush: batch size reached\"", ")", "self", ".", "pool", ".", "submit", "(", "self", ".", "flush_queue", ")", "# Append the record", "logger", ".", "debug", "(", "'Putting record \"{}\"'", ".", "format", "(", "record", "[", "'Data'", "]", "[", ":", "100", "]", ")", ")", "self", ".", "queue", ".", "put", "(", "record", ")" ]
Add data to the record queue in the proper format. Parameters ---------- data : str Data to send. partition_key: str Hash that determines which shard a given data record belongs to.
[ "Add", "data", "to", "the", "record", "queue", "in", "the", "proper", "format", "." ]
train
https://github.com/bufferapp/kiner/blob/c4c6e7ef24e5600864d0bdc4c8b967b339245238/kiner/producer.py#L87-L118
bufferapp/kiner
kiner/producer.py
KinesisProducer.close
def close(self): """Flushes the queue and waits for the executor to finish.""" logger.info('Closing producer') self.flush_queue() self.monitor_running.clear() self.pool.shutdown() logger.info('Producer closed')
python
def close(self): """Flushes the queue and waits for the executor to finish.""" logger.info('Closing producer') self.flush_queue() self.monitor_running.clear() self.pool.shutdown() logger.info('Producer closed')
[ "def", "close", "(", "self", ")", ":", "logger", ".", "info", "(", "'Closing producer'", ")", "self", ".", "flush_queue", "(", ")", "self", ".", "monitor_running", ".", "clear", "(", ")", "self", ".", "pool", ".", "shutdown", "(", ")", "logger", ".", "info", "(", "'Producer closed'", ")" ]
Flushes the queue and waits for the executor to finish.
[ "Flushes", "the", "queue", "and", "waits", "for", "the", "executor", "to", "finish", "." ]
train
https://github.com/bufferapp/kiner/blob/c4c6e7ef24e5600864d0bdc4c8b967b339245238/kiner/producer.py#L120-L126
bufferapp/kiner
kiner/producer.py
KinesisProducer.flush_queue
def flush_queue(self): """Grab all the current records in the queue and send them.""" records = [] while not self.queue.empty() and len(records) < self.batch_size: records.append(self.queue.get()) if records: self.send_records(records) self.last_flush = time.time()
python
def flush_queue(self): """Grab all the current records in the queue and send them.""" records = [] while not self.queue.empty() and len(records) < self.batch_size: records.append(self.queue.get()) if records: self.send_records(records) self.last_flush = time.time()
[ "def", "flush_queue", "(", "self", ")", ":", "records", "=", "[", "]", "while", "not", "self", ".", "queue", ".", "empty", "(", ")", "and", "len", "(", "records", ")", "<", "self", ".", "batch_size", ":", "records", ".", "append", "(", "self", ".", "queue", ".", "get", "(", ")", ")", "if", "records", ":", "self", ".", "send_records", "(", "records", ")", "self", ".", "last_flush", "=", "time", ".", "time", "(", ")" ]
Grab all the current records in the queue and send them.
[ "Grab", "all", "the", "current", "records", "in", "the", "queue", "and", "send", "them", "." ]
train
https://github.com/bufferapp/kiner/blob/c4c6e7ef24e5600864d0bdc4c8b967b339245238/kiner/producer.py#L128-L137
bufferapp/kiner
kiner/producer.py
KinesisProducer.send_records
def send_records(self, records, attempt=0): """Send records to the Kinesis stream. Falied records are sent again with an exponential backoff decay. Parameters ---------- records : array Array of formated records to send. attempt: int Number of times the records have been sent without success. """ # If we already tried more times than we wanted, save to a file if attempt > self.max_retries: logger.warning('Writing {} records to file'.format(len(records))) with open('failed_records.dlq', 'ab') as f: for r in records: f.write(r.get('Data')) return # Sleep before retrying if attempt: time.sleep(2 ** attempt * .1) response = self.kinesis_client.put_records(StreamName=self.stream_name, Records=records) failed_record_count = response['FailedRecordCount'] # Grab failed records if failed_record_count: logger.warning('Retrying failed records') failed_records = [] for i, record in enumerate(response['Records']): if record.get('ErrorCode'): failed_records.append(records[i]) # Recursive call attempt += 1 self.send_records(failed_records, attempt=attempt)
python
def send_records(self, records, attempt=0): """Send records to the Kinesis stream. Falied records are sent again with an exponential backoff decay. Parameters ---------- records : array Array of formated records to send. attempt: int Number of times the records have been sent without success. """ # If we already tried more times than we wanted, save to a file if attempt > self.max_retries: logger.warning('Writing {} records to file'.format(len(records))) with open('failed_records.dlq', 'ab') as f: for r in records: f.write(r.get('Data')) return # Sleep before retrying if attempt: time.sleep(2 ** attempt * .1) response = self.kinesis_client.put_records(StreamName=self.stream_name, Records=records) failed_record_count = response['FailedRecordCount'] # Grab failed records if failed_record_count: logger.warning('Retrying failed records') failed_records = [] for i, record in enumerate(response['Records']): if record.get('ErrorCode'): failed_records.append(records[i]) # Recursive call attempt += 1 self.send_records(failed_records, attempt=attempt)
[ "def", "send_records", "(", "self", ",", "records", ",", "attempt", "=", "0", ")", ":", "# If we already tried more times than we wanted, save to a file", "if", "attempt", ">", "self", ".", "max_retries", ":", "logger", ".", "warning", "(", "'Writing {} records to file'", ".", "format", "(", "len", "(", "records", ")", ")", ")", "with", "open", "(", "'failed_records.dlq'", ",", "'ab'", ")", "as", "f", ":", "for", "r", "in", "records", ":", "f", ".", "write", "(", "r", ".", "get", "(", "'Data'", ")", ")", "return", "# Sleep before retrying", "if", "attempt", ":", "time", ".", "sleep", "(", "2", "**", "attempt", "*", ".1", ")", "response", "=", "self", ".", "kinesis_client", ".", "put_records", "(", "StreamName", "=", "self", ".", "stream_name", ",", "Records", "=", "records", ")", "failed_record_count", "=", "response", "[", "'FailedRecordCount'", "]", "# Grab failed records", "if", "failed_record_count", ":", "logger", ".", "warning", "(", "'Retrying failed records'", ")", "failed_records", "=", "[", "]", "for", "i", ",", "record", "in", "enumerate", "(", "response", "[", "'Records'", "]", ")", ":", "if", "record", ".", "get", "(", "'ErrorCode'", ")", ":", "failed_records", ".", "append", "(", "records", "[", "i", "]", ")", "# Recursive call", "attempt", "+=", "1", "self", ".", "send_records", "(", "failed_records", ",", "attempt", "=", "attempt", ")" ]
Send records to the Kinesis stream. Falied records are sent again with an exponential backoff decay. Parameters ---------- records : array Array of formated records to send. attempt: int Number of times the records have been sent without success.
[ "Send", "records", "to", "the", "Kinesis", "stream", "." ]
train
https://github.com/bufferapp/kiner/blob/c4c6e7ef24e5600864d0bdc4c8b967b339245238/kiner/producer.py#L139-L178
aconrad/pycobertura
pycobertura/utils.py
rangify
def rangify(number_list): """Assumes the list is sorted.""" if not number_list: return number_list ranges = [] range_start = prev_num = number_list[0] for num in number_list[1:]: if num != (prev_num + 1): ranges.append((range_start, prev_num)) range_start = num prev_num = num ranges.append((range_start, prev_num)) return ranges
python
def rangify(number_list): """Assumes the list is sorted.""" if not number_list: return number_list ranges = [] range_start = prev_num = number_list[0] for num in number_list[1:]: if num != (prev_num + 1): ranges.append((range_start, prev_num)) range_start = num prev_num = num ranges.append((range_start, prev_num)) return ranges
[ "def", "rangify", "(", "number_list", ")", ":", "if", "not", "number_list", ":", "return", "number_list", "ranges", "=", "[", "]", "range_start", "=", "prev_num", "=", "number_list", "[", "0", "]", "for", "num", "in", "number_list", "[", "1", ":", "]", ":", "if", "num", "!=", "(", "prev_num", "+", "1", ")", ":", "ranges", ".", "append", "(", "(", "range_start", ",", "prev_num", ")", ")", "range_start", "=", "num", "prev_num", "=", "num", "ranges", ".", "append", "(", "(", "range_start", ",", "prev_num", ")", ")", "return", "ranges" ]
Assumes the list is sorted.
[ "Assumes", "the", "list", "is", "sorted", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/utils.py#L61-L76
aconrad/pycobertura
pycobertura/utils.py
extrapolate_coverage
def extrapolate_coverage(lines_w_status): """ Given the following input: >>> lines_w_status = [ (1, True), (4, True), (7, False), (9, False), ] Return expanded lines with their extrapolated line status. >>> extrapolate_coverage(lines_w_status) == [ (1, True), (2, True), (3, True), (4, True), (5, None), (6, None), (7, False), (8, False), (9, False), ] """ lines = [] prev_lineno = 0 prev_status = True for lineno, status in lines_w_status: while (lineno - prev_lineno) > 1: prev_lineno += 1 if prev_status is status: lines.append((prev_lineno, status)) else: lines.append((prev_lineno, None)) lines.append((lineno, status)) prev_lineno = lineno prev_status = status return lines
python
def extrapolate_coverage(lines_w_status): """ Given the following input: >>> lines_w_status = [ (1, True), (4, True), (7, False), (9, False), ] Return expanded lines with their extrapolated line status. >>> extrapolate_coverage(lines_w_status) == [ (1, True), (2, True), (3, True), (4, True), (5, None), (6, None), (7, False), (8, False), (9, False), ] """ lines = [] prev_lineno = 0 prev_status = True for lineno, status in lines_w_status: while (lineno - prev_lineno) > 1: prev_lineno += 1 if prev_status is status: lines.append((prev_lineno, status)) else: lines.append((prev_lineno, None)) lines.append((lineno, status)) prev_lineno = lineno prev_status = status return lines
[ "def", "extrapolate_coverage", "(", "lines_w_status", ")", ":", "lines", "=", "[", "]", "prev_lineno", "=", "0", "prev_status", "=", "True", "for", "lineno", ",", "status", "in", "lines_w_status", ":", "while", "(", "lineno", "-", "prev_lineno", ")", ">", "1", ":", "prev_lineno", "+=", "1", "if", "prev_status", "is", "status", ":", "lines", ".", "append", "(", "(", "prev_lineno", ",", "status", ")", ")", "else", ":", "lines", ".", "append", "(", "(", "prev_lineno", ",", "None", ")", ")", "lines", ".", "append", "(", "(", "lineno", ",", "status", ")", ")", "prev_lineno", "=", "lineno", "prev_status", "=", "status", "return", "lines" ]
Given the following input: >>> lines_w_status = [ (1, True), (4, True), (7, False), (9, False), ] Return expanded lines with their extrapolated line status. >>> extrapolate_coverage(lines_w_status) == [ (1, True), (2, True), (3, True), (4, True), (5, None), (6, None), (7, False), (8, False), (9, False), ]
[ "Given", "the", "following", "input", ":" ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/utils.py#L79-L120
aconrad/pycobertura
pycobertura/utils.py
reconcile_lines
def reconcile_lines(lines1, lines2): """ Return a dict `{lineno1: lineno2}` which reconciles line numbers `lineno1` of list `lines1` to line numbers `lineno2` of list `lines2`. Only lines that are common in both sets are present in the dict, lines unique to one of the sets are omitted. """ differ = difflib.Differ() diff = differ.compare(lines1, lines2) SAME = ' ' ADDED = '+ ' REMOVED = '- ' INFO = '? ' lineno_map = {} # {lineno1: lineno2, ...} lineno1_offset = 0 lineno2 = 1 for diffline in diff: if diffline.startswith(INFO): continue if diffline.startswith(SAME): lineno1 = lineno2 + lineno1_offset lineno_map[lineno1] = lineno2 elif diffline.startswith(ADDED): lineno1_offset -= 1 elif diffline.startswith(REMOVED): lineno1_offset += 1 continue lineno2 += 1 return lineno_map
python
def reconcile_lines(lines1, lines2): """ Return a dict `{lineno1: lineno2}` which reconciles line numbers `lineno1` of list `lines1` to line numbers `lineno2` of list `lines2`. Only lines that are common in both sets are present in the dict, lines unique to one of the sets are omitted. """ differ = difflib.Differ() diff = differ.compare(lines1, lines2) SAME = ' ' ADDED = '+ ' REMOVED = '- ' INFO = '? ' lineno_map = {} # {lineno1: lineno2, ...} lineno1_offset = 0 lineno2 = 1 for diffline in diff: if diffline.startswith(INFO): continue if diffline.startswith(SAME): lineno1 = lineno2 + lineno1_offset lineno_map[lineno1] = lineno2 elif diffline.startswith(ADDED): lineno1_offset -= 1 elif diffline.startswith(REMOVED): lineno1_offset += 1 continue lineno2 += 1 return lineno_map
[ "def", "reconcile_lines", "(", "lines1", ",", "lines2", ")", ":", "differ", "=", "difflib", ".", "Differ", "(", ")", "diff", "=", "differ", ".", "compare", "(", "lines1", ",", "lines2", ")", "SAME", "=", "' '", "ADDED", "=", "'+ '", "REMOVED", "=", "'- '", "INFO", "=", "'? '", "lineno_map", "=", "{", "}", "# {lineno1: lineno2, ...}", "lineno1_offset", "=", "0", "lineno2", "=", "1", "for", "diffline", "in", "diff", ":", "if", "diffline", ".", "startswith", "(", "INFO", ")", ":", "continue", "if", "diffline", ".", "startswith", "(", "SAME", ")", ":", "lineno1", "=", "lineno2", "+", "lineno1_offset", "lineno_map", "[", "lineno1", "]", "=", "lineno2", "elif", "diffline", ".", "startswith", "(", "ADDED", ")", ":", "lineno1_offset", "-=", "1", "elif", "diffline", ".", "startswith", "(", "REMOVED", ")", ":", "lineno1_offset", "+=", "1", "continue", "lineno2", "+=", "1", "return", "lineno_map" ]
Return a dict `{lineno1: lineno2}` which reconciles line numbers `lineno1` of list `lines1` to line numbers `lineno2` of list `lines2`. Only lines that are common in both sets are present in the dict, lines unique to one of the sets are omitted.
[ "Return", "a", "dict", "{", "lineno1", ":", "lineno2", "}", "which", "reconciles", "line", "numbers", "lineno1", "of", "list", "lines1", "to", "line", "numbers", "lineno2", "of", "list", "lines2", ".", "Only", "lines", "that", "are", "common", "in", "both", "sets", "are", "present", "in", "the", "dict", "lines", "unique", "to", "one", "of", "the", "sets", "are", "omitted", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/utils.py#L123-L159
aconrad/pycobertura
pycobertura/utils.py
hunkify_lines
def hunkify_lines(lines, context=3): """ Return a list of line hunks given a list of lines `lines`. The number of context lines can be control with `context` which will return line hunks surrounded with `context` lines before and after the code change. """ # Find contiguous line changes ranges = [] range_start = None for i, line in enumerate(lines): if line.status is not None: if range_start is None: range_start = i continue elif range_start is not None: range_stop = i ranges.append((range_start, range_stop)) range_start = None else: # Append the last range if range_start is not None: range_stop = i ranges.append((range_start, range_stop)) # add context ranges_w_context = [] for range_start, range_stop in ranges: range_start = range_start - context range_start = range_start if range_start >= 0 else 0 range_stop = range_stop + context ranges_w_context.append((range_start, range_stop)) # merge overlapping hunks merged_ranges = ranges_w_context[:1] for range_start, range_stop in ranges_w_context[1:]: prev_start, prev_stop = merged_ranges[-1] if range_start <= prev_stop: range_start = prev_start merged_ranges[-1] = (range_start, range_stop) else: merged_ranges.append((range_start, range_stop)) # build final hunks hunks = [] for range_start, range_stop in merged_ranges: hunk = lines[range_start:range_stop] hunks.append(hunk) return hunks
python
def hunkify_lines(lines, context=3): """ Return a list of line hunks given a list of lines `lines`. The number of context lines can be control with `context` which will return line hunks surrounded with `context` lines before and after the code change. """ # Find contiguous line changes ranges = [] range_start = None for i, line in enumerate(lines): if line.status is not None: if range_start is None: range_start = i continue elif range_start is not None: range_stop = i ranges.append((range_start, range_stop)) range_start = None else: # Append the last range if range_start is not None: range_stop = i ranges.append((range_start, range_stop)) # add context ranges_w_context = [] for range_start, range_stop in ranges: range_start = range_start - context range_start = range_start if range_start >= 0 else 0 range_stop = range_stop + context ranges_w_context.append((range_start, range_stop)) # merge overlapping hunks merged_ranges = ranges_w_context[:1] for range_start, range_stop in ranges_w_context[1:]: prev_start, prev_stop = merged_ranges[-1] if range_start <= prev_stop: range_start = prev_start merged_ranges[-1] = (range_start, range_stop) else: merged_ranges.append((range_start, range_stop)) # build final hunks hunks = [] for range_start, range_stop in merged_ranges: hunk = lines[range_start:range_stop] hunks.append(hunk) return hunks
[ "def", "hunkify_lines", "(", "lines", ",", "context", "=", "3", ")", ":", "# Find contiguous line changes", "ranges", "=", "[", "]", "range_start", "=", "None", "for", "i", ",", "line", "in", "enumerate", "(", "lines", ")", ":", "if", "line", ".", "status", "is", "not", "None", ":", "if", "range_start", "is", "None", ":", "range_start", "=", "i", "continue", "elif", "range_start", "is", "not", "None", ":", "range_stop", "=", "i", "ranges", ".", "append", "(", "(", "range_start", ",", "range_stop", ")", ")", "range_start", "=", "None", "else", ":", "# Append the last range", "if", "range_start", "is", "not", "None", ":", "range_stop", "=", "i", "ranges", ".", "append", "(", "(", "range_start", ",", "range_stop", ")", ")", "# add context", "ranges_w_context", "=", "[", "]", "for", "range_start", ",", "range_stop", "in", "ranges", ":", "range_start", "=", "range_start", "-", "context", "range_start", "=", "range_start", "if", "range_start", ">=", "0", "else", "0", "range_stop", "=", "range_stop", "+", "context", "ranges_w_context", ".", "append", "(", "(", "range_start", ",", "range_stop", ")", ")", "# merge overlapping hunks", "merged_ranges", "=", "ranges_w_context", "[", ":", "1", "]", "for", "range_start", ",", "range_stop", "in", "ranges_w_context", "[", "1", ":", "]", ":", "prev_start", ",", "prev_stop", "=", "merged_ranges", "[", "-", "1", "]", "if", "range_start", "<=", "prev_stop", ":", "range_start", "=", "prev_start", "merged_ranges", "[", "-", "1", "]", "=", "(", "range_start", ",", "range_stop", ")", "else", ":", "merged_ranges", ".", "append", "(", "(", "range_start", ",", "range_stop", ")", ")", "# build final hunks", "hunks", "=", "[", "]", "for", "range_start", ",", "range_stop", "in", "merged_ranges", ":", "hunk", "=", "lines", "[", "range_start", ":", "range_stop", "]", "hunks", ".", "append", "(", "hunk", ")", "return", "hunks" ]
Return a list of line hunks given a list of lines `lines`. The number of context lines can be control with `context` which will return line hunks surrounded with `context` lines before and after the code change.
[ "Return", "a", "list", "of", "line", "hunks", "given", "a", "list", "of", "lines", "lines", ".", "The", "number", "of", "context", "lines", "can", "be", "control", "with", "context", "which", "will", "return", "line", "hunks", "surrounded", "with", "context", "lines", "before", "and", "after", "the", "code", "change", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/utils.py#L162-L210
aconrad/pycobertura
pycobertura/cli.py
show
def show(cobertura_file, format, output, source, source_prefix): """show coverage summary of a Cobertura report""" cobertura = Cobertura(cobertura_file, source=source) Reporter = reporters[format] reporter = Reporter(cobertura) report = reporter.generate() if not isinstance(report, bytes): report = report.encode('utf-8') isatty = True if output is None else output.isatty() click.echo(report, file=output, nl=isatty)
python
def show(cobertura_file, format, output, source, source_prefix): """show coverage summary of a Cobertura report""" cobertura = Cobertura(cobertura_file, source=source) Reporter = reporters[format] reporter = Reporter(cobertura) report = reporter.generate() if not isinstance(report, bytes): report = report.encode('utf-8') isatty = True if output is None else output.isatty() click.echo(report, file=output, nl=isatty)
[ "def", "show", "(", "cobertura_file", ",", "format", ",", "output", ",", "source", ",", "source_prefix", ")", ":", "cobertura", "=", "Cobertura", "(", "cobertura_file", ",", "source", "=", "source", ")", "Reporter", "=", "reporters", "[", "format", "]", "reporter", "=", "Reporter", "(", "cobertura", ")", "report", "=", "reporter", ".", "generate", "(", ")", "if", "not", "isinstance", "(", "report", ",", "bytes", ")", ":", "report", "=", "report", ".", "encode", "(", "'utf-8'", ")", "isatty", "=", "True", "if", "output", "is", "None", "else", "output", ".", "isatty", "(", ")", "click", ".", "echo", "(", "report", ",", "file", "=", "output", ",", "nl", "=", "isatty", ")" ]
show coverage summary of a Cobertura report
[ "show", "coverage", "summary", "of", "a", "Cobertura", "report" ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cli.py#L65-L76
aconrad/pycobertura
pycobertura/cli.py
diff
def diff( cobertura_file1, cobertura_file2, color, format, output, source1, source2, source_prefix1, source_prefix2, source): """compare coverage of two Cobertura reports""" cobertura1 = Cobertura( cobertura_file1, source=source1, source_prefix=source_prefix1 ) cobertura2 = Cobertura( cobertura_file2, source=source2, source_prefix=source_prefix2 ) Reporter = delta_reporters[format] reporter_args = [cobertura1, cobertura2] reporter_kwargs = {'show_source': source} isatty = True if output is None else output.isatty() if format == 'text': color = isatty if color is None else color is True reporter_kwargs['color'] = color reporter = Reporter(*reporter_args, **reporter_kwargs) report = reporter.generate() if not isinstance(report, bytes): report = report.encode('utf-8') click.echo(report, file=output, nl=isatty, color=color) exit_code = get_exit_code(reporter.differ, source) raise SystemExit(exit_code)
python
def diff( cobertura_file1, cobertura_file2, color, format, output, source1, source2, source_prefix1, source_prefix2, source): """compare coverage of two Cobertura reports""" cobertura1 = Cobertura( cobertura_file1, source=source1, source_prefix=source_prefix1 ) cobertura2 = Cobertura( cobertura_file2, source=source2, source_prefix=source_prefix2 ) Reporter = delta_reporters[format] reporter_args = [cobertura1, cobertura2] reporter_kwargs = {'show_source': source} isatty = True if output is None else output.isatty() if format == 'text': color = isatty if color is None else color is True reporter_kwargs['color'] = color reporter = Reporter(*reporter_args, **reporter_kwargs) report = reporter.generate() if not isinstance(report, bytes): report = report.encode('utf-8') click.echo(report, file=output, nl=isatty, color=color) exit_code = get_exit_code(reporter.differ, source) raise SystemExit(exit_code)
[ "def", "diff", "(", "cobertura_file1", ",", "cobertura_file2", ",", "color", ",", "format", ",", "output", ",", "source1", ",", "source2", ",", "source_prefix1", ",", "source_prefix2", ",", "source", ")", ":", "cobertura1", "=", "Cobertura", "(", "cobertura_file1", ",", "source", "=", "source1", ",", "source_prefix", "=", "source_prefix1", ")", "cobertura2", "=", "Cobertura", "(", "cobertura_file2", ",", "source", "=", "source2", ",", "source_prefix", "=", "source_prefix2", ")", "Reporter", "=", "delta_reporters", "[", "format", "]", "reporter_args", "=", "[", "cobertura1", ",", "cobertura2", "]", "reporter_kwargs", "=", "{", "'show_source'", ":", "source", "}", "isatty", "=", "True", "if", "output", "is", "None", "else", "output", ".", "isatty", "(", ")", "if", "format", "==", "'text'", ":", "color", "=", "isatty", "if", "color", "is", "None", "else", "color", "is", "True", "reporter_kwargs", "[", "'color'", "]", "=", "color", "reporter", "=", "Reporter", "(", "*", "reporter_args", ",", "*", "*", "reporter_kwargs", ")", "report", "=", "reporter", ".", "generate", "(", ")", "if", "not", "isinstance", "(", "report", ",", "bytes", ")", ":", "report", "=", "report", ".", "encode", "(", "'utf-8'", ")", "click", ".", "echo", "(", "report", ",", "file", "=", "output", ",", "nl", "=", "isatty", ",", "color", "=", "color", ")", "exit_code", "=", "get_exit_code", "(", "reporter", ".", "differ", ",", "source", ")", "raise", "SystemExit", "(", "exit_code", ")" ]
compare coverage of two Cobertura reports
[ "compare", "coverage", "of", "two", "Cobertura", "reports" ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cli.py#L141-L176
aconrad/pycobertura
pycobertura/filesystem.py
DirectoryFileSystem.open
def open(self, filename): """ Yield a file-like object for file `filename`. This function is a context manager. """ filename = self.real_filename(filename) if not os.path.exists(filename): raise self.FileNotFound(filename) with codecs.open(filename, encoding='utf-8') as f: yield f
python
def open(self, filename): """ Yield a file-like object for file `filename`. This function is a context manager. """ filename = self.real_filename(filename) if not os.path.exists(filename): raise self.FileNotFound(filename) with codecs.open(filename, encoding='utf-8') as f: yield f
[ "def", "open", "(", "self", ",", "filename", ")", ":", "filename", "=", "self", ".", "real_filename", "(", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "raise", "self", ".", "FileNotFound", "(", "filename", ")", "with", "codecs", ".", "open", "(", "filename", ",", "encoding", "=", "'utf-8'", ")", "as", "f", ":", "yield", "f" ]
Yield a file-like object for file `filename`. This function is a context manager.
[ "Yield", "a", "file", "-", "like", "object", "for", "file", "filename", "." ]
train
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/filesystem.py#L31-L43
pyros-dev/pyros
pyros/client/client.py
PyrosClient.topic_inject
def topic_inject(self, topic_name, _msg_content=None, **kwargs): """ Injecting message into topic. if _msg_content, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _msg_content: optional message content :param kwargs: each extra kwarg will be put int he message is structure matches :return: """ #changing unicode to string ( testing stability of multiprocess debugging ) if isinstance(topic_name, unicode): topic_name = unicodedata.normalize('NFKD', topic_name).encode('ascii', 'ignore') if _msg_content is not None: # logging.warn("injecting {msg} into {topic}".format(msg=_msg_content, topic=topic_name)) res = self.topic_svc.call(args=(topic_name, _msg_content,)) else: # default kwargs is {} # logging.warn("injecting {msg} into {topic}".format(msg=kwargs, topic=topic_name)) res = self.topic_svc.call(args=(topic_name, kwargs,)) return res is None
python
def topic_inject(self, topic_name, _msg_content=None, **kwargs): """ Injecting message into topic. if _msg_content, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _msg_content: optional message content :param kwargs: each extra kwarg will be put int he message is structure matches :return: """ #changing unicode to string ( testing stability of multiprocess debugging ) if isinstance(topic_name, unicode): topic_name = unicodedata.normalize('NFKD', topic_name).encode('ascii', 'ignore') if _msg_content is not None: # logging.warn("injecting {msg} into {topic}".format(msg=_msg_content, topic=topic_name)) res = self.topic_svc.call(args=(topic_name, _msg_content,)) else: # default kwargs is {} # logging.warn("injecting {msg} into {topic}".format(msg=kwargs, topic=topic_name)) res = self.topic_svc.call(args=(topic_name, kwargs,)) return res is None
[ "def", "topic_inject", "(", "self", ",", "topic_name", ",", "_msg_content", "=", "None", ",", "*", "*", "kwargs", ")", ":", "#changing unicode to string ( testing stability of multiprocess debugging )", "if", "isinstance", "(", "topic_name", ",", "unicode", ")", ":", "topic_name", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "topic_name", ")", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", "if", "_msg_content", "is", "not", "None", ":", "# logging.warn(\"injecting {msg} into {topic}\".format(msg=_msg_content, topic=topic_name))", "res", "=", "self", ".", "topic_svc", ".", "call", "(", "args", "=", "(", "topic_name", ",", "_msg_content", ",", ")", ")", "else", ":", "# default kwargs is {}", "# logging.warn(\"injecting {msg} into {topic}\".format(msg=kwargs, topic=topic_name))", "res", "=", "self", ".", "topic_svc", ".", "call", "(", "args", "=", "(", "topic_name", ",", "kwargs", ",", ")", ")", "return", "res", "is", "None" ]
Injecting message into topic. if _msg_content, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _msg_content: optional message content :param kwargs: each extra kwarg will be put int he message is structure matches :return:
[ "Injecting", "message", "into", "topic", ".", "if", "_msg_content", "we", "inject", "it", "directly", ".", "if", "not", "we", "use", "all", "extra", "kwargs", ":", "param", "topic_name", ":", "name", "of", "the", "topic", ":", "param", "_msg_content", ":", "optional", "message", "content", ":", "param", "kwargs", ":", "each", "extra", "kwarg", "will", "be", "put", "int", "he", "message", "is", "structure", "matches", ":", "return", ":" ]
train
https://github.com/pyros-dev/pyros/blob/59f6c8848a66481a4039cbf28b7673428181842e/pyros/client/client.py#L111-L130
pyros-dev/pyros
pyros/client/client.py
PyrosClient.param_set
def param_set(self, param_name, _value=None, **kwargs): """ Setting parameter. if _value, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _value: optional value :param kwargs: each extra kwarg will be put in the value if structure matches :return: """ #changing unicode to string ( testing stability of multiprocess debugging ) if isinstance(param_name, unicode): param_name = unicodedata.normalize('NFKD', param_name).encode('ascii', 'ignore') _value = _value or {} if kwargs: res = self.param_svc.call(args=(param_name, kwargs,)) elif _value is not None: res = self.param_svc.call(args=(param_name, _value,)) else: # if _msg_content is None the request is invalid. # just return something to mean False. res = 'WRONG SET' return res is None
python
def param_set(self, param_name, _value=None, **kwargs): """ Setting parameter. if _value, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _value: optional value :param kwargs: each extra kwarg will be put in the value if structure matches :return: """ #changing unicode to string ( testing stability of multiprocess debugging ) if isinstance(param_name, unicode): param_name = unicodedata.normalize('NFKD', param_name).encode('ascii', 'ignore') _value = _value or {} if kwargs: res = self.param_svc.call(args=(param_name, kwargs,)) elif _value is not None: res = self.param_svc.call(args=(param_name, _value,)) else: # if _msg_content is None the request is invalid. # just return something to mean False. res = 'WRONG SET' return res is None
[ "def", "param_set", "(", "self", ",", "param_name", ",", "_value", "=", "None", ",", "*", "*", "kwargs", ")", ":", "#changing unicode to string ( testing stability of multiprocess debugging )", "if", "isinstance", "(", "param_name", ",", "unicode", ")", ":", "param_name", "=", "unicodedata", ".", "normalize", "(", "'NFKD'", ",", "param_name", ")", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", "_value", "=", "_value", "or", "{", "}", "if", "kwargs", ":", "res", "=", "self", ".", "param_svc", ".", "call", "(", "args", "=", "(", "param_name", ",", "kwargs", ",", ")", ")", "elif", "_value", "is", "not", "None", ":", "res", "=", "self", ".", "param_svc", ".", "call", "(", "args", "=", "(", "param_name", ",", "_value", ",", ")", ")", "else", ":", "# if _msg_content is None the request is invalid.", "# just return something to mean False.", "res", "=", "'WRONG SET'", "return", "res", "is", "None" ]
Setting parameter. if _value, we inject it directly. if not, we use all extra kwargs :param topic_name: name of the topic :param _value: optional value :param kwargs: each extra kwarg will be put in the value if structure matches :return:
[ "Setting", "parameter", ".", "if", "_value", "we", "inject", "it", "directly", ".", "if", "not", "we", "use", "all", "extra", "kwargs", ":", "param", "topic_name", ":", "name", "of", "the", "topic", ":", "param", "_value", ":", "optional", "value", ":", "param", "kwargs", ":", "each", "extra", "kwarg", "will", "be", "put", "in", "the", "value", "if", "structure", "matches", ":", "return", ":" ]
train
https://github.com/pyros-dev/pyros/blob/59f6c8848a66481a4039cbf28b7673428181842e/pyros/client/client.py#L165-L187
pyros-dev/pyros
setup.py
PrepareReleaseCommand.run
def run(self): """runner""" # change version in code and changelog before running this subprocess.check_call("git commit CHANGELOG.rst pyros/_version.py CHANGELOG.rst -m 'v{0}'".format(__version__), shell=True) subprocess.check_call("git push", shell=True) print("You should verify travis checks, and you can publish this release with :") print(" python setup.py publish") sys.exit()
python
def run(self): """runner""" # change version in code and changelog before running this subprocess.check_call("git commit CHANGELOG.rst pyros/_version.py CHANGELOG.rst -m 'v{0}'".format(__version__), shell=True) subprocess.check_call("git push", shell=True) print("You should verify travis checks, and you can publish this release with :") print(" python setup.py publish") sys.exit()
[ "def", "run", "(", "self", ")", ":", "# change version in code and changelog before running this", "subprocess", ".", "check_call", "(", "\"git commit CHANGELOG.rst pyros/_version.py CHANGELOG.rst -m 'v{0}'\"", ".", "format", "(", "__version__", ")", ",", "shell", "=", "True", ")", "subprocess", ".", "check_call", "(", "\"git push\"", ",", "shell", "=", "True", ")", "print", "(", "\"You should verify travis checks, and you can publish this release with :\"", ")", "print", "(", "\" python setup.py publish\"", ")", "sys", ".", "exit", "(", ")" ]
runner
[ "runner" ]
train
https://github.com/pyros-dev/pyros/blob/59f6c8848a66481a4039cbf28b7673428181842e/setup.py#L40-L49
pyros-dev/pyros
pyros/__main__.py
run
def run(interface, config, logfile, ros_args): """ Start a pyros node. :param interface: the interface implementation (ROS, Mock, ZMP, etc.) :param config: the config file path, absolute, or relative to working directory :param logfile: the logfile path, absolute, or relative to working directory :param ros_args: the ros arguments (useful to absorb additional args when launched with roslaunch) """ logging.info( 'pyros started with : interface {interface} config {config} logfile {logfile} ros_args {ros_args}'.format( interface=interface, config=config, logfile=logfile, ros_args=ros_args)) if interface == 'ros': node_proc = pyros_rosinterface_launch(node_name='pyros_rosinterface', pyros_config=config, ros_argv=ros_args) else: node_proc = None # NOT IMPLEMENTED # node_proc.daemon = True # we do NOT want a daemon(would stop when this main process exits...) client_conn = node_proc.start()
python
def run(interface, config, logfile, ros_args): """ Start a pyros node. :param interface: the interface implementation (ROS, Mock, ZMP, etc.) :param config: the config file path, absolute, or relative to working directory :param logfile: the logfile path, absolute, or relative to working directory :param ros_args: the ros arguments (useful to absorb additional args when launched with roslaunch) """ logging.info( 'pyros started with : interface {interface} config {config} logfile {logfile} ros_args {ros_args}'.format( interface=interface, config=config, logfile=logfile, ros_args=ros_args)) if interface == 'ros': node_proc = pyros_rosinterface_launch(node_name='pyros_rosinterface', pyros_config=config, ros_argv=ros_args) else: node_proc = None # NOT IMPLEMENTED # node_proc.daemon = True # we do NOT want a daemon(would stop when this main process exits...) client_conn = node_proc.start()
[ "def", "run", "(", "interface", ",", "config", ",", "logfile", ",", "ros_args", ")", ":", "logging", ".", "info", "(", "'pyros started with : interface {interface} config {config} logfile {logfile} ros_args {ros_args}'", ".", "format", "(", "interface", "=", "interface", ",", "config", "=", "config", ",", "logfile", "=", "logfile", ",", "ros_args", "=", "ros_args", ")", ")", "if", "interface", "==", "'ros'", ":", "node_proc", "=", "pyros_rosinterface_launch", "(", "node_name", "=", "'pyros_rosinterface'", ",", "pyros_config", "=", "config", ",", "ros_argv", "=", "ros_args", ")", "else", ":", "node_proc", "=", "None", "# NOT IMPLEMENTED", "# node_proc.daemon = True # we do NOT want a daemon(would stop when this main process exits...)", "client_conn", "=", "node_proc", ".", "start", "(", ")" ]
Start a pyros node. :param interface: the interface implementation (ROS, Mock, ZMP, etc.) :param config: the config file path, absolute, or relative to working directory :param logfile: the logfile path, absolute, or relative to working directory :param ros_args: the ros arguments (useful to absorb additional args when launched with roslaunch)
[ "Start", "a", "pyros", "node", ".", ":", "param", "interface", ":", "the", "interface", "implementation", "(", "ROS", "Mock", "ZMP", "etc", ".", ")", ":", "param", "config", ":", "the", "config", "file", "path", "absolute", "or", "relative", "to", "working", "directory", ":", "param", "logfile", ":", "the", "logfile", "path", "absolute", "or", "relative", "to", "working", "directory", ":", "param", "ros_args", ":", "the", "ros", "arguments", "(", "useful", "to", "absorb", "additional", "args", "when", "launched", "with", "roslaunch", ")" ]
train
https://github.com/pyros-dev/pyros/blob/59f6c8848a66481a4039cbf28b7673428181842e/pyros/__main__.py#L207-L225
numberoverzero/bottom
bottom/unpack.py
nickmask
def nickmask(prefix: str, kwargs: Dict[str, Any]) -> None: """ store nick, user, host in kwargs if prefix is correct format """ if "!" in prefix and "@" in prefix: # From a user kwargs["nick"], remainder = prefix.split("!", 1) kwargs["user"], kwargs["host"] = remainder.split("@", 1) else: # From a server, probably the host kwargs["host"] = prefix
python
def nickmask(prefix: str, kwargs: Dict[str, Any]) -> None: """ store nick, user, host in kwargs if prefix is correct format """ if "!" in prefix and "@" in prefix: # From a user kwargs["nick"], remainder = prefix.split("!", 1) kwargs["user"], kwargs["host"] = remainder.split("@", 1) else: # From a server, probably the host kwargs["host"] = prefix
[ "def", "nickmask", "(", "prefix", ":", "str", ",", "kwargs", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "None", ":", "if", "\"!\"", "in", "prefix", "and", "\"@\"", "in", "prefix", ":", "# From a user", "kwargs", "[", "\"nick\"", "]", ",", "remainder", "=", "prefix", ".", "split", "(", "\"!\"", ",", "1", ")", "kwargs", "[", "\"user\"", "]", ",", "kwargs", "[", "\"host\"", "]", "=", "remainder", ".", "split", "(", "\"@\"", ",", "1", ")", "else", ":", "# From a server, probably the host", "kwargs", "[", "\"host\"", "]", "=", "prefix" ]
store nick, user, host in kwargs if prefix is correct format
[ "store", "nick", "user", "host", "in", "kwargs", "if", "prefix", "is", "correct", "format" ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/unpack.py#L175-L183
numberoverzero/bottom
bottom/unpack.py
split_line
def split_line(msg: str) -> Tuple[str, str, List[str]]: """ Parse message according to rfc 2812 for routing """ match = RE_IRCLINE.match(msg) if not match: raise ValueError("Invalid line") prefix = match.group("prefix") or "" command = match.group("command") params = (match.group("params") or "").split() message = match.group("message") or "" if message: params.append(message) return prefix, command, params
python
def split_line(msg: str) -> Tuple[str, str, List[str]]: """ Parse message according to rfc 2812 for routing """ match = RE_IRCLINE.match(msg) if not match: raise ValueError("Invalid line") prefix = match.group("prefix") or "" command = match.group("command") params = (match.group("params") or "").split() message = match.group("message") or "" if message: params.append(message) return prefix, command, params
[ "def", "split_line", "(", "msg", ":", "str", ")", "->", "Tuple", "[", "str", ",", "str", ",", "List", "[", "str", "]", "]", ":", "match", "=", "RE_IRCLINE", ".", "match", "(", "msg", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Invalid line\"", ")", "prefix", "=", "match", ".", "group", "(", "\"prefix\"", ")", "or", "\"\"", "command", "=", "match", ".", "group", "(", "\"command\"", ")", "params", "=", "(", "match", ".", "group", "(", "\"params\"", ")", "or", "\"\"", ")", ".", "split", "(", ")", "message", "=", "match", ".", "group", "(", "\"message\"", ")", "or", "\"\"", "if", "message", ":", "params", ".", "append", "(", "message", ")", "return", "prefix", ",", "command", ",", "params" ]
Parse message according to rfc 2812 for routing
[ "Parse", "message", "according", "to", "rfc", "2812", "for", "routing" ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/unpack.py#L190-L204
numberoverzero/bottom
bottom/pack.py
b
def b(field: str, kwargs: Dict[str, Any], present: Optional[Any] = None, missing: Any = '') -> str: """ Return `present` value (default to `field`) if `field` in `kwargs` and Truthy, otherwise return `missing` value """ if kwargs.get(field): return field if present is None else str(present) return str(missing)
python
def b(field: str, kwargs: Dict[str, Any], present: Optional[Any] = None, missing: Any = '') -> str: """ Return `present` value (default to `field`) if `field` in `kwargs` and Truthy, otherwise return `missing` value """ if kwargs.get(field): return field if present is None else str(present) return str(missing)
[ "def", "b", "(", "field", ":", "str", ",", "kwargs", ":", "Dict", "[", "str", ",", "Any", "]", ",", "present", ":", "Optional", "[", "Any", "]", "=", "None", ",", "missing", ":", "Any", "=", "''", ")", "->", "str", ":", "if", "kwargs", ".", "get", "(", "field", ")", ":", "return", "field", "if", "present", "is", "None", "else", "str", "(", "present", ")", "return", "str", "(", "missing", ")" ]
Return `present` value (default to `field`) if `field` in `kwargs` and Truthy, otherwise return `missing` value
[ "Return", "present", "value", "(", "default", "to", "field", ")", "if", "field", "in", "kwargs", "and", "Truthy", "otherwise", "return", "missing", "value" ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/pack.py#L7-L15
numberoverzero/bottom
bottom/pack.py
f
def f(field: str, kwargs: Dict[str, Any], default: Optional[Any] = None) -> str: """ Alias for more readable command construction """ if default is not None: return str(kwargs.get(field, default)) return str(kwargs[field])
python
def f(field: str, kwargs: Dict[str, Any], default: Optional[Any] = None) -> str: """ Alias for more readable command construction """ if default is not None: return str(kwargs.get(field, default)) return str(kwargs[field])
[ "def", "f", "(", "field", ":", "str", ",", "kwargs", ":", "Dict", "[", "str", ",", "Any", "]", ",", "default", ":", "Optional", "[", "Any", "]", "=", "None", ")", "->", "str", ":", "if", "default", "is", "not", "None", ":", "return", "str", "(", "kwargs", ".", "get", "(", "field", ",", "default", ")", ")", "return", "str", "(", "kwargs", "[", "field", "]", ")" ]
Alias for more readable command construction
[ "Alias", "for", "more", "readable", "command", "construction" ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/pack.py#L18-L23
numberoverzero/bottom
bottom/pack.py
pack
def pack(field: str, kwargs: Dict[str, Any], default: Optional[Any] = None, sep: str=',') -> str: """ Util for joining multiple fields with commas """ if default is not None: value = kwargs.get(field, default) else: value = kwargs[field] if isinstance(value, str): return value elif isinstance(value, collections.abc.Iterable): return sep.join(str(f) for f in value) else: return str(value)
python
def pack(field: str, kwargs: Dict[str, Any], default: Optional[Any] = None, sep: str=',') -> str: """ Util for joining multiple fields with commas """ if default is not None: value = kwargs.get(field, default) else: value = kwargs[field] if isinstance(value, str): return value elif isinstance(value, collections.abc.Iterable): return sep.join(str(f) for f in value) else: return str(value)
[ "def", "pack", "(", "field", ":", "str", ",", "kwargs", ":", "Dict", "[", "str", ",", "Any", "]", ",", "default", ":", "Optional", "[", "Any", "]", "=", "None", ",", "sep", ":", "str", "=", "','", ")", "->", "str", ":", "if", "default", "is", "not", "None", ":", "value", "=", "kwargs", ".", "get", "(", "field", ",", "default", ")", "else", ":", "value", "=", "kwargs", "[", "field", "]", "if", "isinstance", "(", "value", ",", "str", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "collections", ".", "abc", ".", "Iterable", ")", ":", "return", "sep", ".", "join", "(", "str", "(", "f", ")", "for", "f", "in", "value", ")", "else", ":", "return", "str", "(", "value", ")" ]
Util for joining multiple fields with commas
[ "Util", "for", "joining", "multiple", "fields", "with", "commas" ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/pack.py#L26-L38
numberoverzero/bottom
bottom/pack.py
pack_command
def pack_command(command: str, **kwargs: Any) -> str: """ Pack a command to send to an IRC server """ if not command: raise ValueError("Must provide a command") if not isinstance(command, str): raise ValueError("Command must be a string") command = command.upper() # ======================================================================== # For each command, provide: # 1. a link to the definition in rfc2812 # 2. the normalized grammar, which may not equate to the rfc grammar # the normalized grammar will use the keys expected in kwargs, # which usually do NOT line up with rfc2812. They may also make # optional fields which are required in rfc2812, by providing # the most common or reasonable defaults. # 3. exhaustive examples, preferring normalized form of # the rfc2812 examples # ======================================================================== # ======================================================================== # Normalized grammar: # : should not be provided; it denotes the beginning of the last # field, which may contain spaces # [] indicates an optional field # <> denote the key that the field will be filled with # because fields are filled from a dict, required fields may follow # optional fields - see USER command, where mode is optional # (and defaults to 0) # "" indicates a literal value that is inserted if present # ======================================================================== # PASS # https://tools.ietf.org/html/rfc2812#section-3.1.1 # PASS <password> # ---------- # PASS secretpasswordhere if command == "PASS": return "PASS " + f("password", kwargs) # NICK # https://tools.ietf.org/html/rfc2812#section-3.1.2 # NICK <nick> # ---------- # NICK Wiz elif command == "NICK": return "NICK " + f("nick", kwargs) # USER # https://tools.ietf.org/html/rfc2812#section-3.1.3 # USER <user> [<mode>] :<realname> # ---------- # USER guest 8 :Ronnie Reagan # USER guest :Ronnie Reagan elif command == "USER": return "USER {} {} * :{}".format( f("user", kwargs), f("mode", kwargs, 0), f("realname", kwargs)) # OPER # https://tools.ietf.org/html/rfc2812#section-3.1.4 # OPER <user> <password> # ---------- # OPER AzureDiamond hunter2 elif command == "OPER": return "OPER {} {}".format(f("user", kwargs), f("password", kwargs)) # USERMODE (renamed from MODE) # https://tools.ietf.org/html/rfc2812#section-3.1.5 # MODE <nick> [<modes>] # ---------- # MODE WiZ -w # MODE Angel +i # MODE elif command == "USERMODE": return "MODE {} {}".format(f("nick", kwargs), f("modes", kwargs, '')) # SERVICE # https://tools.ietf.org/html/rfc2812#section-3.1.6 # SERVICE <nick> <distribution> <type> :<info> # ---------- # SERVICE dict *.fr 0 :French elif command == "SERVICE": return "SERVICE {} * {} {} 0 :{}".format( f("nick", kwargs), f("distribution", kwargs), f("type", kwargs), f("info", kwargs)) # QUIT # https://tools.ietf.org/html/rfc2812#section-3.1.7 # QUIT :[<message>] # ---------- # QUIT :Gone to lunch # QUIT elif command == "QUIT": if "message" in kwargs: return "QUIT :" + f("message", kwargs) return "QUIT" # SQUIT # https://tools.ietf.org/html/rfc2812#section-3.1.8 # SQUIT <server> [<message>] # ---------- # SQUIT tolsun.oulu.fi :Bad Link # SQUIT tolsun.oulu.fi elif command == "SQUIT": base = "SQUIT " + f("server", kwargs) if "message" in kwargs: return base + " :" + f("message", kwargs) return base # JOIN # https://tools.ietf.org/html/rfc2812#section-3.2.1 # JOIN <channel> [<key>] # ---------- # JOIN #foo fookey # JOIN #foo # JOIN 0 elif command == "JOIN": return "JOIN {} {}".format(pack("channel", kwargs), pack("key", kwargs, '')) # PART # https://tools.ietf.org/html/rfc2812#section-3.2.2 # PART <channel> :[<message>] # ---------- # PART #foo :I lost # PART #foo elif command == "PART": base = "PART " + pack("channel", kwargs) if "message" in kwargs: return base + " :" + f("message", kwargs) return base # CHANNELMODE (renamed from MODE) # https://tools.ietf.org/html/rfc2812#section-3.2.3 # MODE <channel> <modes> [<params>] # ---------- # MODE #Finnish +imI *!*@*.fi # MODE #en-ops +v WiZ # MODE #Fins -s elif command == "CHANNELMODE": return "MODE {} {} {}".format(f("channel", kwargs), f("modes", kwargs), f("params", kwargs, '')) # TOPIC # https://tools.ietf.org/html/rfc2812#section-3.2.4 # TOPIC <channel> :[<message>] # ---------- # TOPIC #test :New topic # TOPIC #test : # TOPIC #test elif command == "TOPIC": base = "TOPIC " + f("channel", kwargs) if "message" in kwargs: return base + " :" + f("message", kwargs) return base # NAMES # https://tools.ietf.org/html/rfc2812#section-3.2.5 # NAMES [<channel>] [<target>] # ---------- # NAMES #twilight_zone remote.*.edu # NAMES #twilight_zone # NAMES elif command == "NAMES": if "channel" in kwargs: return "NAMES {} {}".format(pack("channel", kwargs), f("target", kwargs, '')) return "NAMES" # LIST # https://tools.ietf.org/html/rfc2812#section-3.2.6 # LIST [<channel>] [<target>] # ---------- # LIST #twilight_zone remote.*.edu # LIST #twilight_zone # LIST elif command == "LIST": if "channel" in kwargs: return "LIST {} {}".format(pack("channel", kwargs), f("target", kwargs, '')) return "LIST" # INVITE # https://tools.ietf.org/html/rfc2812#section-3.2.7 # INVITE <nick> <channel> # ---------- # INVITE Wiz #Twilight_Zone elif command == "INVITE": return "INVITE {} {}".format(f("nick", kwargs), f("channel", kwargs)) # KICK # https://tools.ietf.org/html/rfc2812#section-3.2.8 # KICK <channel> <nick> :[<message>] # ---------- # KICK #Finnish WiZ :Speaking English # KICK #Finnish WiZ,Wiz-Bot :Both speaking English # KICK #Finnish,#English WiZ,ZiW :Speaking wrong language elif command == "KICK": base = "KICK {} {}".format(pack("channel", kwargs), pack("nick", kwargs)) if "message" in kwargs: return base + " :" + pack("message", kwargs) return base # PRIVMSG # https://tools.ietf.org/html/rfc2812#section-3.3.1 # PRIVMSG <target> :<message> # ---------- # PRIVMSG Angel :yes I'm receiving it ! # PRIVMSG $*.fi :Server tolsun.oulu.fi rebooting. # PRIVMSG #Finnish :This message is in english elif command == "PRIVMSG": return "PRIVMSG {} :{}".format(f("target", kwargs), f("message", kwargs)) # NOTICE # https://tools.ietf.org/html/rfc2812#section-3.3.2 # NOTICE <target> :<message> # ---------- # NOTICE Angel :yes I'm receiving it ! # NOTICE $*.fi :Server tolsun.oulu.fi rebooting. # NOTICE #Finnish :This message is in english elif command == "NOTICE": return "NOTICE {} :{}".format(f("target", kwargs), f("message", kwargs)) # MOTD # https://tools.ietf.org/html/rfc2812#section-3.4.1 # MOTD [<target>] # ---------- # MOTD remote.*.edu # MOTD elif command == "MOTD": return "MOTD " + f("target", kwargs, '') # LUSERS # https://tools.ietf.org/html/rfc2812#section-3.4.2 # LUSERS [<mask>] [<target>] # ---------- # LUSERS *.edu remote.*.edu # LUSERS *.edu # LUSERS elif command == "LUSERS": if "mask" in kwargs: return "LUSERS {} {}".format(f("mask", kwargs), f("target", kwargs, '')) return "LUSERS" # VERSION # https://tools.ietf.org/html/rfc2812#section-3.4.3 # VERSION [<target>] # ---------- # VERSION remote.*.edu # VERSION elif command == "VERSION": return "VERSION " + f("target", kwargs, '') # STATS # https://tools.ietf.org/html/rfc2812#section-3.4.4 # STATS [<query>] [<target>] # ---------- # STATS m remote.*.edu # STATS m # STATS elif command == "STATS": if "query" in kwargs: return "STATS {} {}".format(f("query", kwargs), f("target", kwargs, '')) return "STATS" # LINKS # https://tools.ietf.org/html/rfc2812#section-3.4.5 # LINKS [<remote>] [<mask>] # ---------- # LINKS *.edu *.bu.edu # LINKS *.au # LINKS elif command == "LINKS": if "remote" in kwargs: return "LINKS {} {}".format(f("remote", kwargs), f("mask", kwargs)) elif "mask" in kwargs: return "LINKS " + f("mask", kwargs) return "LINKS" # TIME # https://tools.ietf.org/html/rfc2812#section-3.4.6 # TIME [<target>] # ---------- # TIME remote.*.edu # TIME elif command == "TIME": return "TIME " + f("target", kwargs, '') # CONNECT # https://tools.ietf.org/html/rfc2812#section-3.4.7 # CONNECT <target> <port> [<remote>] # ---------- # CONNECT tolsun.oulu.fi 6667 *.edu # CONNECT tolsun.oulu.fi 6667 elif command == "CONNECT": return "CONNECT {} {} {}".format(f("target", kwargs), f("port", kwargs), f("remote", kwargs, '')) # TRACE # https://tools.ietf.org/html/rfc2812#section-3.4.8 # TRACE [<target>] # ---------- # TRACE elif command == "TRACE": return "TRACE " + f("target", kwargs, '') # ADMIN # https://tools.ietf.org/html/rfc2812#section-3.4.9 # ADMIN [<target>] # ---------- # ADMIN elif command == "ADMIN": return "ADMIN " + f("target", kwargs, '') # INFO # https://tools.ietf.org/html/rfc2812#section-3.4.10 # INFO [<target>] # ---------- # INFO elif command == "INFO": return "INFO " + f("target", kwargs, '') # SERVLIST # https://tools.ietf.org/html/rfc2812#section-3.5.1 # SERVLIST [<mask>] [<type>] # ---------- # SERVLIST *SERV 3 # SERVLIST *SERV # SERVLIST elif command == "SERVLIST": return "SERVLIST {} {}".format(f("mask", kwargs, ''), f("type", kwargs, '')) # SQUERY # https://tools.ietf.org/html/rfc2812#section-3.5.2 # SQUERY <target> :<message> # ---------- # SQUERY irchelp :HELP privmsg elif command == "SQUERY": return "SQUERY {} :{}".format(f("target", kwargs), f("message", kwargs)) # WHO # https://tools.ietf.org/html/rfc2812#section-3.6.1 # WHO [<mask>] ["o"] # ---------- # WHO jto* o # WHO *.fi # WHO elif command == "WHO": return "WHO {} {}".format(f("mask", kwargs, ''), b("o", kwargs)) # WHOIS # https://tools.ietf.org/html/rfc2812#section-3.6.2 # WHOIS <mask> [<target>] # ---------- # WHOIS jto* o remote.*.edu # WHOIS jto* o # WHOIS *.fi elif command == "WHOIS": return "WHOIS {} {}".format(pack("mask", kwargs), f("target", kwargs, '')) # WHOWAS # https://tools.ietf.org/html/rfc2812#section-3.6.3 # WHOWAS <nick> [<count>] [<target>] # ---------- # WHOWAS Wiz 9 remote.*.edu # WHOWAS Wiz 9 # WHOWAS Mermaid elif command == "WHOWAS": if "count" in kwargs: return "WHOWAS {} {} {}".format(pack("nick", kwargs), f("count", kwargs), f("target", kwargs, '')) return "WHOWAS " + pack("nick", kwargs) # KILL # https://tools.ietf.org/html/rfc2812#section-3.7.1 # KILL <nick> :<message> # ---------- # KILL WiZ :Spamming joins elif command == "KILL": return "KILL {} :{}".format(f("nick", kwargs), f("message", kwargs)) # PING # https://tools.ietf.org/html/rfc2812#section-3.7.2 # PING :[<message>] # ---------- # PING :I'm still here # PING elif command == "PING": if "message" in kwargs: return "PING :{}".format(f("message", kwargs)) else: return "PING" # PONG # https://tools.ietf.org/html/rfc2812#section-3.7.3 # PONG :[<message>] # ---------- # PONG :I'm still here # PONG elif command == "PONG": if "message" in kwargs: return "PONG :{}".format(f("message", kwargs)) else: return "PONG" # AWAY # https://tools.ietf.org/html/rfc2812#section-4.1 # AWAY :[<message>] # ---------- # AWAY :Gone to lunch. # AWAY elif command == "AWAY": if "message" in kwargs: return "AWAY :" + f("message", kwargs) return "AWAY" # REHASH # https://tools.ietf.org/html/rfc2812#section-4.2 # REHASH # ---------- # REHASH elif command == "REHASH": return "REHASH" # DIE # https://tools.ietf.org/html/rfc2812#section-4.3 # DIE # ---------- # DIE elif command == "DIE": return "DIE" # RESTART # https://tools.ietf.org/html/rfc2812#section-4.4 # RESTART # ---------- # RESTART elif command == "RESTART": return "RESTART" # SUMMON # https://tools.ietf.org/html/rfc2812#section-4.5 # SUMMON <nick> [<target>] [<channel>] # ---------- # SUMMON Wiz remote.*.edu #Finnish # SUMMON Wiz remote.*.edu # SUMMON Wiz elif command == "SUMMON": if "target" in kwargs: return "SUMMON {} {} {}".format(f("nick", kwargs), f("target", kwargs), f("channel", kwargs, '')) return "SUMMON " + f("nick", kwargs) # USERS # https://tools.ietf.org/html/rfc2812#section-4.6 # USERS [<target>] # ---------- # USERS remote.*.edu # USERS elif command == "USERS": return "USERS " + f("target", kwargs, '') # WALLOPS # https://tools.ietf.org/html/rfc2812#section-4.7 # WALLOPS :<message> # ---------- # WALLOPS :Maintenance in 5 minutes elif command == "WALLOPS": return "WALLOPS :" + f("message", kwargs) # USERHOST # https://tools.ietf.org/html/rfc2812#section-4.8 # USERHOST <nick> # ---------- # USERHOST Wiz Michael syrk # USERHOST syrk elif command == "USERHOST": return "USERHOST " + pack("nick", kwargs, sep=" ") # ISON # https://tools.ietf.org/html/rfc2812#section-4.9 # ISON <nick> # ---------- # ISON Wiz Michael syrk # ISON syrk elif command == "ISON": return "ISON " + pack("nick", kwargs, sep=" ") else: raise ValueError("Unknown command '{}'".format(command))
python
def pack_command(command: str, **kwargs: Any) -> str: """ Pack a command to send to an IRC server """ if not command: raise ValueError("Must provide a command") if not isinstance(command, str): raise ValueError("Command must be a string") command = command.upper() # ======================================================================== # For each command, provide: # 1. a link to the definition in rfc2812 # 2. the normalized grammar, which may not equate to the rfc grammar # the normalized grammar will use the keys expected in kwargs, # which usually do NOT line up with rfc2812. They may also make # optional fields which are required in rfc2812, by providing # the most common or reasonable defaults. # 3. exhaustive examples, preferring normalized form of # the rfc2812 examples # ======================================================================== # ======================================================================== # Normalized grammar: # : should not be provided; it denotes the beginning of the last # field, which may contain spaces # [] indicates an optional field # <> denote the key that the field will be filled with # because fields are filled from a dict, required fields may follow # optional fields - see USER command, where mode is optional # (and defaults to 0) # "" indicates a literal value that is inserted if present # ======================================================================== # PASS # https://tools.ietf.org/html/rfc2812#section-3.1.1 # PASS <password> # ---------- # PASS secretpasswordhere if command == "PASS": return "PASS " + f("password", kwargs) # NICK # https://tools.ietf.org/html/rfc2812#section-3.1.2 # NICK <nick> # ---------- # NICK Wiz elif command == "NICK": return "NICK " + f("nick", kwargs) # USER # https://tools.ietf.org/html/rfc2812#section-3.1.3 # USER <user> [<mode>] :<realname> # ---------- # USER guest 8 :Ronnie Reagan # USER guest :Ronnie Reagan elif command == "USER": return "USER {} {} * :{}".format( f("user", kwargs), f("mode", kwargs, 0), f("realname", kwargs)) # OPER # https://tools.ietf.org/html/rfc2812#section-3.1.4 # OPER <user> <password> # ---------- # OPER AzureDiamond hunter2 elif command == "OPER": return "OPER {} {}".format(f("user", kwargs), f("password", kwargs)) # USERMODE (renamed from MODE) # https://tools.ietf.org/html/rfc2812#section-3.1.5 # MODE <nick> [<modes>] # ---------- # MODE WiZ -w # MODE Angel +i # MODE elif command == "USERMODE": return "MODE {} {}".format(f("nick", kwargs), f("modes", kwargs, '')) # SERVICE # https://tools.ietf.org/html/rfc2812#section-3.1.6 # SERVICE <nick> <distribution> <type> :<info> # ---------- # SERVICE dict *.fr 0 :French elif command == "SERVICE": return "SERVICE {} * {} {} 0 :{}".format( f("nick", kwargs), f("distribution", kwargs), f("type", kwargs), f("info", kwargs)) # QUIT # https://tools.ietf.org/html/rfc2812#section-3.1.7 # QUIT :[<message>] # ---------- # QUIT :Gone to lunch # QUIT elif command == "QUIT": if "message" in kwargs: return "QUIT :" + f("message", kwargs) return "QUIT" # SQUIT # https://tools.ietf.org/html/rfc2812#section-3.1.8 # SQUIT <server> [<message>] # ---------- # SQUIT tolsun.oulu.fi :Bad Link # SQUIT tolsun.oulu.fi elif command == "SQUIT": base = "SQUIT " + f("server", kwargs) if "message" in kwargs: return base + " :" + f("message", kwargs) return base # JOIN # https://tools.ietf.org/html/rfc2812#section-3.2.1 # JOIN <channel> [<key>] # ---------- # JOIN #foo fookey # JOIN #foo # JOIN 0 elif command == "JOIN": return "JOIN {} {}".format(pack("channel", kwargs), pack("key", kwargs, '')) # PART # https://tools.ietf.org/html/rfc2812#section-3.2.2 # PART <channel> :[<message>] # ---------- # PART #foo :I lost # PART #foo elif command == "PART": base = "PART " + pack("channel", kwargs) if "message" in kwargs: return base + " :" + f("message", kwargs) return base # CHANNELMODE (renamed from MODE) # https://tools.ietf.org/html/rfc2812#section-3.2.3 # MODE <channel> <modes> [<params>] # ---------- # MODE #Finnish +imI *!*@*.fi # MODE #en-ops +v WiZ # MODE #Fins -s elif command == "CHANNELMODE": return "MODE {} {} {}".format(f("channel", kwargs), f("modes", kwargs), f("params", kwargs, '')) # TOPIC # https://tools.ietf.org/html/rfc2812#section-3.2.4 # TOPIC <channel> :[<message>] # ---------- # TOPIC #test :New topic # TOPIC #test : # TOPIC #test elif command == "TOPIC": base = "TOPIC " + f("channel", kwargs) if "message" in kwargs: return base + " :" + f("message", kwargs) return base # NAMES # https://tools.ietf.org/html/rfc2812#section-3.2.5 # NAMES [<channel>] [<target>] # ---------- # NAMES #twilight_zone remote.*.edu # NAMES #twilight_zone # NAMES elif command == "NAMES": if "channel" in kwargs: return "NAMES {} {}".format(pack("channel", kwargs), f("target", kwargs, '')) return "NAMES" # LIST # https://tools.ietf.org/html/rfc2812#section-3.2.6 # LIST [<channel>] [<target>] # ---------- # LIST #twilight_zone remote.*.edu # LIST #twilight_zone # LIST elif command == "LIST": if "channel" in kwargs: return "LIST {} {}".format(pack("channel", kwargs), f("target", kwargs, '')) return "LIST" # INVITE # https://tools.ietf.org/html/rfc2812#section-3.2.7 # INVITE <nick> <channel> # ---------- # INVITE Wiz #Twilight_Zone elif command == "INVITE": return "INVITE {} {}".format(f("nick", kwargs), f("channel", kwargs)) # KICK # https://tools.ietf.org/html/rfc2812#section-3.2.8 # KICK <channel> <nick> :[<message>] # ---------- # KICK #Finnish WiZ :Speaking English # KICK #Finnish WiZ,Wiz-Bot :Both speaking English # KICK #Finnish,#English WiZ,ZiW :Speaking wrong language elif command == "KICK": base = "KICK {} {}".format(pack("channel", kwargs), pack("nick", kwargs)) if "message" in kwargs: return base + " :" + pack("message", kwargs) return base # PRIVMSG # https://tools.ietf.org/html/rfc2812#section-3.3.1 # PRIVMSG <target> :<message> # ---------- # PRIVMSG Angel :yes I'm receiving it ! # PRIVMSG $*.fi :Server tolsun.oulu.fi rebooting. # PRIVMSG #Finnish :This message is in english elif command == "PRIVMSG": return "PRIVMSG {} :{}".format(f("target", kwargs), f("message", kwargs)) # NOTICE # https://tools.ietf.org/html/rfc2812#section-3.3.2 # NOTICE <target> :<message> # ---------- # NOTICE Angel :yes I'm receiving it ! # NOTICE $*.fi :Server tolsun.oulu.fi rebooting. # NOTICE #Finnish :This message is in english elif command == "NOTICE": return "NOTICE {} :{}".format(f("target", kwargs), f("message", kwargs)) # MOTD # https://tools.ietf.org/html/rfc2812#section-3.4.1 # MOTD [<target>] # ---------- # MOTD remote.*.edu # MOTD elif command == "MOTD": return "MOTD " + f("target", kwargs, '') # LUSERS # https://tools.ietf.org/html/rfc2812#section-3.4.2 # LUSERS [<mask>] [<target>] # ---------- # LUSERS *.edu remote.*.edu # LUSERS *.edu # LUSERS elif command == "LUSERS": if "mask" in kwargs: return "LUSERS {} {}".format(f("mask", kwargs), f("target", kwargs, '')) return "LUSERS" # VERSION # https://tools.ietf.org/html/rfc2812#section-3.4.3 # VERSION [<target>] # ---------- # VERSION remote.*.edu # VERSION elif command == "VERSION": return "VERSION " + f("target", kwargs, '') # STATS # https://tools.ietf.org/html/rfc2812#section-3.4.4 # STATS [<query>] [<target>] # ---------- # STATS m remote.*.edu # STATS m # STATS elif command == "STATS": if "query" in kwargs: return "STATS {} {}".format(f("query", kwargs), f("target", kwargs, '')) return "STATS" # LINKS # https://tools.ietf.org/html/rfc2812#section-3.4.5 # LINKS [<remote>] [<mask>] # ---------- # LINKS *.edu *.bu.edu # LINKS *.au # LINKS elif command == "LINKS": if "remote" in kwargs: return "LINKS {} {}".format(f("remote", kwargs), f("mask", kwargs)) elif "mask" in kwargs: return "LINKS " + f("mask", kwargs) return "LINKS" # TIME # https://tools.ietf.org/html/rfc2812#section-3.4.6 # TIME [<target>] # ---------- # TIME remote.*.edu # TIME elif command == "TIME": return "TIME " + f("target", kwargs, '') # CONNECT # https://tools.ietf.org/html/rfc2812#section-3.4.7 # CONNECT <target> <port> [<remote>] # ---------- # CONNECT tolsun.oulu.fi 6667 *.edu # CONNECT tolsun.oulu.fi 6667 elif command == "CONNECT": return "CONNECT {} {} {}".format(f("target", kwargs), f("port", kwargs), f("remote", kwargs, '')) # TRACE # https://tools.ietf.org/html/rfc2812#section-3.4.8 # TRACE [<target>] # ---------- # TRACE elif command == "TRACE": return "TRACE " + f("target", kwargs, '') # ADMIN # https://tools.ietf.org/html/rfc2812#section-3.4.9 # ADMIN [<target>] # ---------- # ADMIN elif command == "ADMIN": return "ADMIN " + f("target", kwargs, '') # INFO # https://tools.ietf.org/html/rfc2812#section-3.4.10 # INFO [<target>] # ---------- # INFO elif command == "INFO": return "INFO " + f("target", kwargs, '') # SERVLIST # https://tools.ietf.org/html/rfc2812#section-3.5.1 # SERVLIST [<mask>] [<type>] # ---------- # SERVLIST *SERV 3 # SERVLIST *SERV # SERVLIST elif command == "SERVLIST": return "SERVLIST {} {}".format(f("mask", kwargs, ''), f("type", kwargs, '')) # SQUERY # https://tools.ietf.org/html/rfc2812#section-3.5.2 # SQUERY <target> :<message> # ---------- # SQUERY irchelp :HELP privmsg elif command == "SQUERY": return "SQUERY {} :{}".format(f("target", kwargs), f("message", kwargs)) # WHO # https://tools.ietf.org/html/rfc2812#section-3.6.1 # WHO [<mask>] ["o"] # ---------- # WHO jto* o # WHO *.fi # WHO elif command == "WHO": return "WHO {} {}".format(f("mask", kwargs, ''), b("o", kwargs)) # WHOIS # https://tools.ietf.org/html/rfc2812#section-3.6.2 # WHOIS <mask> [<target>] # ---------- # WHOIS jto* o remote.*.edu # WHOIS jto* o # WHOIS *.fi elif command == "WHOIS": return "WHOIS {} {}".format(pack("mask", kwargs), f("target", kwargs, '')) # WHOWAS # https://tools.ietf.org/html/rfc2812#section-3.6.3 # WHOWAS <nick> [<count>] [<target>] # ---------- # WHOWAS Wiz 9 remote.*.edu # WHOWAS Wiz 9 # WHOWAS Mermaid elif command == "WHOWAS": if "count" in kwargs: return "WHOWAS {} {} {}".format(pack("nick", kwargs), f("count", kwargs), f("target", kwargs, '')) return "WHOWAS " + pack("nick", kwargs) # KILL # https://tools.ietf.org/html/rfc2812#section-3.7.1 # KILL <nick> :<message> # ---------- # KILL WiZ :Spamming joins elif command == "KILL": return "KILL {} :{}".format(f("nick", kwargs), f("message", kwargs)) # PING # https://tools.ietf.org/html/rfc2812#section-3.7.2 # PING :[<message>] # ---------- # PING :I'm still here # PING elif command == "PING": if "message" in kwargs: return "PING :{}".format(f("message", kwargs)) else: return "PING" # PONG # https://tools.ietf.org/html/rfc2812#section-3.7.3 # PONG :[<message>] # ---------- # PONG :I'm still here # PONG elif command == "PONG": if "message" in kwargs: return "PONG :{}".format(f("message", kwargs)) else: return "PONG" # AWAY # https://tools.ietf.org/html/rfc2812#section-4.1 # AWAY :[<message>] # ---------- # AWAY :Gone to lunch. # AWAY elif command == "AWAY": if "message" in kwargs: return "AWAY :" + f("message", kwargs) return "AWAY" # REHASH # https://tools.ietf.org/html/rfc2812#section-4.2 # REHASH # ---------- # REHASH elif command == "REHASH": return "REHASH" # DIE # https://tools.ietf.org/html/rfc2812#section-4.3 # DIE # ---------- # DIE elif command == "DIE": return "DIE" # RESTART # https://tools.ietf.org/html/rfc2812#section-4.4 # RESTART # ---------- # RESTART elif command == "RESTART": return "RESTART" # SUMMON # https://tools.ietf.org/html/rfc2812#section-4.5 # SUMMON <nick> [<target>] [<channel>] # ---------- # SUMMON Wiz remote.*.edu #Finnish # SUMMON Wiz remote.*.edu # SUMMON Wiz elif command == "SUMMON": if "target" in kwargs: return "SUMMON {} {} {}".format(f("nick", kwargs), f("target", kwargs), f("channel", kwargs, '')) return "SUMMON " + f("nick", kwargs) # USERS # https://tools.ietf.org/html/rfc2812#section-4.6 # USERS [<target>] # ---------- # USERS remote.*.edu # USERS elif command == "USERS": return "USERS " + f("target", kwargs, '') # WALLOPS # https://tools.ietf.org/html/rfc2812#section-4.7 # WALLOPS :<message> # ---------- # WALLOPS :Maintenance in 5 minutes elif command == "WALLOPS": return "WALLOPS :" + f("message", kwargs) # USERHOST # https://tools.ietf.org/html/rfc2812#section-4.8 # USERHOST <nick> # ---------- # USERHOST Wiz Michael syrk # USERHOST syrk elif command == "USERHOST": return "USERHOST " + pack("nick", kwargs, sep=" ") # ISON # https://tools.ietf.org/html/rfc2812#section-4.9 # ISON <nick> # ---------- # ISON Wiz Michael syrk # ISON syrk elif command == "ISON": return "ISON " + pack("nick", kwargs, sep=" ") else: raise ValueError("Unknown command '{}'".format(command))
[ "def", "pack_command", "(", "command", ":", "str", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "str", ":", "if", "not", "command", ":", "raise", "ValueError", "(", "\"Must provide a command\"", ")", "if", "not", "isinstance", "(", "command", ",", "str", ")", ":", "raise", "ValueError", "(", "\"Command must be a string\"", ")", "command", "=", "command", ".", "upper", "(", ")", "# ========================================================================", "# For each command, provide:", "# 1. a link to the definition in rfc2812", "# 2. the normalized grammar, which may not equate to the rfc grammar", "# the normalized grammar will use the keys expected in kwargs,", "# which usually do NOT line up with rfc2812. They may also make", "# optional fields which are required in rfc2812, by providing", "# the most common or reasonable defaults.", "# 3. exhaustive examples, preferring normalized form of", "# the rfc2812 examples", "# ========================================================================", "# ========================================================================", "# Normalized grammar:", "# : should not be provided; it denotes the beginning of the last", "# field, which may contain spaces", "# [] indicates an optional field", "# <> denote the key that the field will be filled with", "# because fields are filled from a dict, required fields may follow", "# optional fields - see USER command, where mode is optional", "# (and defaults to 0)", "# \"\" indicates a literal value that is inserted if present", "# ========================================================================", "# PASS", "# https://tools.ietf.org/html/rfc2812#section-3.1.1", "# PASS <password>", "# ----------", "# PASS secretpasswordhere", "if", "command", "==", "\"PASS\"", ":", "return", "\"PASS \"", "+", "f", "(", "\"password\"", ",", "kwargs", ")", "# NICK", "# https://tools.ietf.org/html/rfc2812#section-3.1.2", "# NICK <nick>", "# ----------", "# NICK Wiz", "elif", "command", "==", "\"NICK\"", ":", "return", "\"NICK \"", "+", "f", "(", "\"nick\"", ",", "kwargs", ")", "# USER", "# https://tools.ietf.org/html/rfc2812#section-3.1.3", "# USER <user> [<mode>] :<realname>", "# ----------", "# USER guest 8 :Ronnie Reagan", "# USER guest :Ronnie Reagan", "elif", "command", "==", "\"USER\"", ":", "return", "\"USER {} {} * :{}\"", ".", "format", "(", "f", "(", "\"user\"", ",", "kwargs", ")", ",", "f", "(", "\"mode\"", ",", "kwargs", ",", "0", ")", ",", "f", "(", "\"realname\"", ",", "kwargs", ")", ")", "# OPER", "# https://tools.ietf.org/html/rfc2812#section-3.1.4", "# OPER <user> <password>", "# ----------", "# OPER AzureDiamond hunter2", "elif", "command", "==", "\"OPER\"", ":", "return", "\"OPER {} {}\"", ".", "format", "(", "f", "(", "\"user\"", ",", "kwargs", ")", ",", "f", "(", "\"password\"", ",", "kwargs", ")", ")", "# USERMODE (renamed from MODE)", "# https://tools.ietf.org/html/rfc2812#section-3.1.5", "# MODE <nick> [<modes>]", "# ----------", "# MODE WiZ -w", "# MODE Angel +i", "# MODE", "elif", "command", "==", "\"USERMODE\"", ":", "return", "\"MODE {} {}\"", ".", "format", "(", "f", "(", "\"nick\"", ",", "kwargs", ")", ",", "f", "(", "\"modes\"", ",", "kwargs", ",", "''", ")", ")", "# SERVICE", "# https://tools.ietf.org/html/rfc2812#section-3.1.6", "# SERVICE <nick> <distribution> <type> :<info>", "# ----------", "# SERVICE dict *.fr 0 :French", "elif", "command", "==", "\"SERVICE\"", ":", "return", "\"SERVICE {} * {} {} 0 :{}\"", ".", "format", "(", "f", "(", "\"nick\"", ",", "kwargs", ")", ",", "f", "(", "\"distribution\"", ",", "kwargs", ")", ",", "f", "(", "\"type\"", ",", "kwargs", ")", ",", "f", "(", "\"info\"", ",", "kwargs", ")", ")", "# QUIT", "# https://tools.ietf.org/html/rfc2812#section-3.1.7", "# QUIT :[<message>]", "# ----------", "# QUIT :Gone to lunch", "# QUIT", "elif", "command", "==", "\"QUIT\"", ":", "if", "\"message\"", "in", "kwargs", ":", "return", "\"QUIT :\"", "+", "f", "(", "\"message\"", ",", "kwargs", ")", "return", "\"QUIT\"", "# SQUIT", "# https://tools.ietf.org/html/rfc2812#section-3.1.8", "# SQUIT <server> [<message>]", "# ----------", "# SQUIT tolsun.oulu.fi :Bad Link", "# SQUIT tolsun.oulu.fi", "elif", "command", "==", "\"SQUIT\"", ":", "base", "=", "\"SQUIT \"", "+", "f", "(", "\"server\"", ",", "kwargs", ")", "if", "\"message\"", "in", "kwargs", ":", "return", "base", "+", "\" :\"", "+", "f", "(", "\"message\"", ",", "kwargs", ")", "return", "base", "# JOIN", "# https://tools.ietf.org/html/rfc2812#section-3.2.1", "# JOIN <channel> [<key>]", "# ----------", "# JOIN #foo fookey", "# JOIN #foo", "# JOIN 0", "elif", "command", "==", "\"JOIN\"", ":", "return", "\"JOIN {} {}\"", ".", "format", "(", "pack", "(", "\"channel\"", ",", "kwargs", ")", ",", "pack", "(", "\"key\"", ",", "kwargs", ",", "''", ")", ")", "# PART", "# https://tools.ietf.org/html/rfc2812#section-3.2.2", "# PART <channel> :[<message>]", "# ----------", "# PART #foo :I lost", "# PART #foo", "elif", "command", "==", "\"PART\"", ":", "base", "=", "\"PART \"", "+", "pack", "(", "\"channel\"", ",", "kwargs", ")", "if", "\"message\"", "in", "kwargs", ":", "return", "base", "+", "\" :\"", "+", "f", "(", "\"message\"", ",", "kwargs", ")", "return", "base", "# CHANNELMODE (renamed from MODE)", "# https://tools.ietf.org/html/rfc2812#section-3.2.3", "# MODE <channel> <modes> [<params>]", "# ----------", "# MODE #Finnish +imI *!*@*.fi", "# MODE #en-ops +v WiZ", "# MODE #Fins -s", "elif", "command", "==", "\"CHANNELMODE\"", ":", "return", "\"MODE {} {} {}\"", ".", "format", "(", "f", "(", "\"channel\"", ",", "kwargs", ")", ",", "f", "(", "\"modes\"", ",", "kwargs", ")", ",", "f", "(", "\"params\"", ",", "kwargs", ",", "''", ")", ")", "# TOPIC", "# https://tools.ietf.org/html/rfc2812#section-3.2.4", "# TOPIC <channel> :[<message>]", "# ----------", "# TOPIC #test :New topic", "# TOPIC #test :", "# TOPIC #test", "elif", "command", "==", "\"TOPIC\"", ":", "base", "=", "\"TOPIC \"", "+", "f", "(", "\"channel\"", ",", "kwargs", ")", "if", "\"message\"", "in", "kwargs", ":", "return", "base", "+", "\" :\"", "+", "f", "(", "\"message\"", ",", "kwargs", ")", "return", "base", "# NAMES", "# https://tools.ietf.org/html/rfc2812#section-3.2.5", "# NAMES [<channel>] [<target>]", "# ----------", "# NAMES #twilight_zone remote.*.edu", "# NAMES #twilight_zone", "# NAMES", "elif", "command", "==", "\"NAMES\"", ":", "if", "\"channel\"", "in", "kwargs", ":", "return", "\"NAMES {} {}\"", ".", "format", "(", "pack", "(", "\"channel\"", ",", "kwargs", ")", ",", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", ")", "return", "\"NAMES\"", "# LIST", "# https://tools.ietf.org/html/rfc2812#section-3.2.6", "# LIST [<channel>] [<target>]", "# ----------", "# LIST #twilight_zone remote.*.edu", "# LIST #twilight_zone", "# LIST", "elif", "command", "==", "\"LIST\"", ":", "if", "\"channel\"", "in", "kwargs", ":", "return", "\"LIST {} {}\"", ".", "format", "(", "pack", "(", "\"channel\"", ",", "kwargs", ")", ",", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", ")", "return", "\"LIST\"", "# INVITE", "# https://tools.ietf.org/html/rfc2812#section-3.2.7", "# INVITE <nick> <channel>", "# ----------", "# INVITE Wiz #Twilight_Zone", "elif", "command", "==", "\"INVITE\"", ":", "return", "\"INVITE {} {}\"", ".", "format", "(", "f", "(", "\"nick\"", ",", "kwargs", ")", ",", "f", "(", "\"channel\"", ",", "kwargs", ")", ")", "# KICK", "# https://tools.ietf.org/html/rfc2812#section-3.2.8", "# KICK <channel> <nick> :[<message>]", "# ----------", "# KICK #Finnish WiZ :Speaking English", "# KICK #Finnish WiZ,Wiz-Bot :Both speaking English", "# KICK #Finnish,#English WiZ,ZiW :Speaking wrong language", "elif", "command", "==", "\"KICK\"", ":", "base", "=", "\"KICK {} {}\"", ".", "format", "(", "pack", "(", "\"channel\"", ",", "kwargs", ")", ",", "pack", "(", "\"nick\"", ",", "kwargs", ")", ")", "if", "\"message\"", "in", "kwargs", ":", "return", "base", "+", "\" :\"", "+", "pack", "(", "\"message\"", ",", "kwargs", ")", "return", "base", "# PRIVMSG", "# https://tools.ietf.org/html/rfc2812#section-3.3.1", "# PRIVMSG <target> :<message>", "# ----------", "# PRIVMSG Angel :yes I'm receiving it !", "# PRIVMSG $*.fi :Server tolsun.oulu.fi rebooting.", "# PRIVMSG #Finnish :This message is in english", "elif", "command", "==", "\"PRIVMSG\"", ":", "return", "\"PRIVMSG {} :{}\"", ".", "format", "(", "f", "(", "\"target\"", ",", "kwargs", ")", ",", "f", "(", "\"message\"", ",", "kwargs", ")", ")", "# NOTICE", "# https://tools.ietf.org/html/rfc2812#section-3.3.2", "# NOTICE <target> :<message>", "# ----------", "# NOTICE Angel :yes I'm receiving it !", "# NOTICE $*.fi :Server tolsun.oulu.fi rebooting.", "# NOTICE #Finnish :This message is in english", "elif", "command", "==", "\"NOTICE\"", ":", "return", "\"NOTICE {} :{}\"", ".", "format", "(", "f", "(", "\"target\"", ",", "kwargs", ")", ",", "f", "(", "\"message\"", ",", "kwargs", ")", ")", "# MOTD", "# https://tools.ietf.org/html/rfc2812#section-3.4.1", "# MOTD [<target>]", "# ----------", "# MOTD remote.*.edu", "# MOTD", "elif", "command", "==", "\"MOTD\"", ":", "return", "\"MOTD \"", "+", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", "# LUSERS", "# https://tools.ietf.org/html/rfc2812#section-3.4.2", "# LUSERS [<mask>] [<target>]", "# ----------", "# LUSERS *.edu remote.*.edu", "# LUSERS *.edu", "# LUSERS", "elif", "command", "==", "\"LUSERS\"", ":", "if", "\"mask\"", "in", "kwargs", ":", "return", "\"LUSERS {} {}\"", ".", "format", "(", "f", "(", "\"mask\"", ",", "kwargs", ")", ",", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", ")", "return", "\"LUSERS\"", "# VERSION", "# https://tools.ietf.org/html/rfc2812#section-3.4.3", "# VERSION [<target>]", "# ----------", "# VERSION remote.*.edu", "# VERSION", "elif", "command", "==", "\"VERSION\"", ":", "return", "\"VERSION \"", "+", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", "# STATS", "# https://tools.ietf.org/html/rfc2812#section-3.4.4", "# STATS [<query>] [<target>]", "# ----------", "# STATS m remote.*.edu", "# STATS m", "# STATS", "elif", "command", "==", "\"STATS\"", ":", "if", "\"query\"", "in", "kwargs", ":", "return", "\"STATS {} {}\"", ".", "format", "(", "f", "(", "\"query\"", ",", "kwargs", ")", ",", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", ")", "return", "\"STATS\"", "# LINKS", "# https://tools.ietf.org/html/rfc2812#section-3.4.5", "# LINKS [<remote>] [<mask>]", "# ----------", "# LINKS *.edu *.bu.edu", "# LINKS *.au", "# LINKS", "elif", "command", "==", "\"LINKS\"", ":", "if", "\"remote\"", "in", "kwargs", ":", "return", "\"LINKS {} {}\"", ".", "format", "(", "f", "(", "\"remote\"", ",", "kwargs", ")", ",", "f", "(", "\"mask\"", ",", "kwargs", ")", ")", "elif", "\"mask\"", "in", "kwargs", ":", "return", "\"LINKS \"", "+", "f", "(", "\"mask\"", ",", "kwargs", ")", "return", "\"LINKS\"", "# TIME", "# https://tools.ietf.org/html/rfc2812#section-3.4.6", "# TIME [<target>]", "# ----------", "# TIME remote.*.edu", "# TIME", "elif", "command", "==", "\"TIME\"", ":", "return", "\"TIME \"", "+", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", "# CONNECT", "# https://tools.ietf.org/html/rfc2812#section-3.4.7", "# CONNECT <target> <port> [<remote>]", "# ----------", "# CONNECT tolsun.oulu.fi 6667 *.edu", "# CONNECT tolsun.oulu.fi 6667", "elif", "command", "==", "\"CONNECT\"", ":", "return", "\"CONNECT {} {} {}\"", ".", "format", "(", "f", "(", "\"target\"", ",", "kwargs", ")", ",", "f", "(", "\"port\"", ",", "kwargs", ")", ",", "f", "(", "\"remote\"", ",", "kwargs", ",", "''", ")", ")", "# TRACE", "# https://tools.ietf.org/html/rfc2812#section-3.4.8", "# TRACE [<target>]", "# ----------", "# TRACE", "elif", "command", "==", "\"TRACE\"", ":", "return", "\"TRACE \"", "+", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", "# ADMIN", "# https://tools.ietf.org/html/rfc2812#section-3.4.9", "# ADMIN [<target>]", "# ----------", "# ADMIN", "elif", "command", "==", "\"ADMIN\"", ":", "return", "\"ADMIN \"", "+", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", "# INFO", "# https://tools.ietf.org/html/rfc2812#section-3.4.10", "# INFO [<target>]", "# ----------", "# INFO", "elif", "command", "==", "\"INFO\"", ":", "return", "\"INFO \"", "+", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", "# SERVLIST", "# https://tools.ietf.org/html/rfc2812#section-3.5.1", "# SERVLIST [<mask>] [<type>]", "# ----------", "# SERVLIST *SERV 3", "# SERVLIST *SERV", "# SERVLIST", "elif", "command", "==", "\"SERVLIST\"", ":", "return", "\"SERVLIST {} {}\"", ".", "format", "(", "f", "(", "\"mask\"", ",", "kwargs", ",", "''", ")", ",", "f", "(", "\"type\"", ",", "kwargs", ",", "''", ")", ")", "# SQUERY", "# https://tools.ietf.org/html/rfc2812#section-3.5.2", "# SQUERY <target> :<message>", "# ----------", "# SQUERY irchelp :HELP privmsg", "elif", "command", "==", "\"SQUERY\"", ":", "return", "\"SQUERY {} :{}\"", ".", "format", "(", "f", "(", "\"target\"", ",", "kwargs", ")", ",", "f", "(", "\"message\"", ",", "kwargs", ")", ")", "# WHO", "# https://tools.ietf.org/html/rfc2812#section-3.6.1", "# WHO [<mask>] [\"o\"]", "# ----------", "# WHO jto* o", "# WHO *.fi", "# WHO", "elif", "command", "==", "\"WHO\"", ":", "return", "\"WHO {} {}\"", ".", "format", "(", "f", "(", "\"mask\"", ",", "kwargs", ",", "''", ")", ",", "b", "(", "\"o\"", ",", "kwargs", ")", ")", "# WHOIS", "# https://tools.ietf.org/html/rfc2812#section-3.6.2", "# WHOIS <mask> [<target>]", "# ----------", "# WHOIS jto* o remote.*.edu", "# WHOIS jto* o", "# WHOIS *.fi", "elif", "command", "==", "\"WHOIS\"", ":", "return", "\"WHOIS {} {}\"", ".", "format", "(", "pack", "(", "\"mask\"", ",", "kwargs", ")", ",", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", ")", "# WHOWAS", "# https://tools.ietf.org/html/rfc2812#section-3.6.3", "# WHOWAS <nick> [<count>] [<target>]", "# ----------", "# WHOWAS Wiz 9 remote.*.edu", "# WHOWAS Wiz 9", "# WHOWAS Mermaid", "elif", "command", "==", "\"WHOWAS\"", ":", "if", "\"count\"", "in", "kwargs", ":", "return", "\"WHOWAS {} {} {}\"", ".", "format", "(", "pack", "(", "\"nick\"", ",", "kwargs", ")", ",", "f", "(", "\"count\"", ",", "kwargs", ")", ",", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", ")", "return", "\"WHOWAS \"", "+", "pack", "(", "\"nick\"", ",", "kwargs", ")", "# KILL", "# https://tools.ietf.org/html/rfc2812#section-3.7.1", "# KILL <nick> :<message>", "# ----------", "# KILL WiZ :Spamming joins", "elif", "command", "==", "\"KILL\"", ":", "return", "\"KILL {} :{}\"", ".", "format", "(", "f", "(", "\"nick\"", ",", "kwargs", ")", ",", "f", "(", "\"message\"", ",", "kwargs", ")", ")", "# PING", "# https://tools.ietf.org/html/rfc2812#section-3.7.2", "# PING :[<message>]", "# ----------", "# PING :I'm still here", "# PING", "elif", "command", "==", "\"PING\"", ":", "if", "\"message\"", "in", "kwargs", ":", "return", "\"PING :{}\"", ".", "format", "(", "f", "(", "\"message\"", ",", "kwargs", ")", ")", "else", ":", "return", "\"PING\"", "# PONG", "# https://tools.ietf.org/html/rfc2812#section-3.7.3", "# PONG :[<message>]", "# ----------", "# PONG :I'm still here", "# PONG", "elif", "command", "==", "\"PONG\"", ":", "if", "\"message\"", "in", "kwargs", ":", "return", "\"PONG :{}\"", ".", "format", "(", "f", "(", "\"message\"", ",", "kwargs", ")", ")", "else", ":", "return", "\"PONG\"", "# AWAY", "# https://tools.ietf.org/html/rfc2812#section-4.1", "# AWAY :[<message>]", "# ----------", "# AWAY :Gone to lunch.", "# AWAY", "elif", "command", "==", "\"AWAY\"", ":", "if", "\"message\"", "in", "kwargs", ":", "return", "\"AWAY :\"", "+", "f", "(", "\"message\"", ",", "kwargs", ")", "return", "\"AWAY\"", "# REHASH", "# https://tools.ietf.org/html/rfc2812#section-4.2", "# REHASH", "# ----------", "# REHASH", "elif", "command", "==", "\"REHASH\"", ":", "return", "\"REHASH\"", "# DIE", "# https://tools.ietf.org/html/rfc2812#section-4.3", "# DIE", "# ----------", "# DIE", "elif", "command", "==", "\"DIE\"", ":", "return", "\"DIE\"", "# RESTART", "# https://tools.ietf.org/html/rfc2812#section-4.4", "# RESTART", "# ----------", "# RESTART", "elif", "command", "==", "\"RESTART\"", ":", "return", "\"RESTART\"", "# SUMMON", "# https://tools.ietf.org/html/rfc2812#section-4.5", "# SUMMON <nick> [<target>] [<channel>]", "# ----------", "# SUMMON Wiz remote.*.edu #Finnish", "# SUMMON Wiz remote.*.edu", "# SUMMON Wiz", "elif", "command", "==", "\"SUMMON\"", ":", "if", "\"target\"", "in", "kwargs", ":", "return", "\"SUMMON {} {} {}\"", ".", "format", "(", "f", "(", "\"nick\"", ",", "kwargs", ")", ",", "f", "(", "\"target\"", ",", "kwargs", ")", ",", "f", "(", "\"channel\"", ",", "kwargs", ",", "''", ")", ")", "return", "\"SUMMON \"", "+", "f", "(", "\"nick\"", ",", "kwargs", ")", "# USERS", "# https://tools.ietf.org/html/rfc2812#section-4.6", "# USERS [<target>]", "# ----------", "# USERS remote.*.edu", "# USERS", "elif", "command", "==", "\"USERS\"", ":", "return", "\"USERS \"", "+", "f", "(", "\"target\"", ",", "kwargs", ",", "''", ")", "# WALLOPS", "# https://tools.ietf.org/html/rfc2812#section-4.7", "# WALLOPS :<message>", "# ----------", "# WALLOPS :Maintenance in 5 minutes", "elif", "command", "==", "\"WALLOPS\"", ":", "return", "\"WALLOPS :\"", "+", "f", "(", "\"message\"", ",", "kwargs", ")", "# USERHOST", "# https://tools.ietf.org/html/rfc2812#section-4.8", "# USERHOST <nick>", "# ----------", "# USERHOST Wiz Michael syrk", "# USERHOST syrk", "elif", "command", "==", "\"USERHOST\"", ":", "return", "\"USERHOST \"", "+", "pack", "(", "\"nick\"", ",", "kwargs", ",", "sep", "=", "\" \"", ")", "# ISON", "# https://tools.ietf.org/html/rfc2812#section-4.9", "# ISON <nick>", "# ----------", "# ISON Wiz Michael syrk", "# ISON syrk", "elif", "command", "==", "\"ISON\"", ":", "return", "\"ISON \"", "+", "pack", "(", "\"nick\"", ",", "kwargs", ",", "sep", "=", "\" \"", ")", "else", ":", "raise", "ValueError", "(", "\"Unknown command '{}'\"", ".", "format", "(", "command", ")", ")" ]
Pack a command to send to an IRC server
[ "Pack", "a", "command", "to", "send", "to", "an", "IRC", "server" ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/pack.py#L41-L547
numberoverzero/bottom
bottom/client.py
RawClient.connect
async def connect(self) -> None: """Open a connection to the defined server.""" def protocol_factory() -> Protocol: return Protocol(client=self) _, protocol = await self.loop.create_connection( protocol_factory, host=self.host, port=self.port, ssl=self.ssl ) # type: Tuple[Any, Any] if self.protocol: self.protocol.close() self.protocol = protocol # TODO: Delete the following code line. It is currently kept in order # to not break the current existing codebase. Removing it requires a # heavy change in the test codebase. protocol.client = self self.trigger("client_connect")
python
async def connect(self) -> None: """Open a connection to the defined server.""" def protocol_factory() -> Protocol: return Protocol(client=self) _, protocol = await self.loop.create_connection( protocol_factory, host=self.host, port=self.port, ssl=self.ssl ) # type: Tuple[Any, Any] if self.protocol: self.protocol.close() self.protocol = protocol # TODO: Delete the following code line. It is currently kept in order # to not break the current existing codebase. Removing it requires a # heavy change in the test codebase. protocol.client = self self.trigger("client_connect")
[ "async", "def", "connect", "(", "self", ")", "->", "None", ":", "def", "protocol_factory", "(", ")", "->", "Protocol", ":", "return", "Protocol", "(", "client", "=", "self", ")", "_", ",", "protocol", "=", "await", "self", ".", "loop", ".", "create_connection", "(", "protocol_factory", ",", "host", "=", "self", ".", "host", ",", "port", "=", "self", ".", "port", ",", "ssl", "=", "self", ".", "ssl", ")", "# type: Tuple[Any, Any]", "if", "self", ".", "protocol", ":", "self", ".", "protocol", ".", "close", "(", ")", "self", ".", "protocol", "=", "protocol", "# TODO: Delete the following code line. It is currently kept in order", "# to not break the current existing codebase. Removing it requires a", "# heavy change in the test codebase.", "protocol", ".", "client", "=", "self", "self", ".", "trigger", "(", "\"client_connect\"", ")" ]
Open a connection to the defined server.
[ "Open", "a", "connection", "to", "the", "defined", "server", "." ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/client.py#L69-L87
numberoverzero/bottom
bottom/client.py
RawClient.trigger
def trigger(self, event: str, **kwargs: Any) -> None: """Trigger all handlers for an event to (asynchronously) execute""" event = event.upper() for func in self._event_handlers[event]: self.loop.create_task(func(**kwargs)) # This will unblock anyone that is awaiting on the next loop update, # while still ensuring the next `await client.wait(event)` doesn't # immediately fire. async_event = self._events[event] async_event.set() async_event.clear()
python
def trigger(self, event: str, **kwargs: Any) -> None: """Trigger all handlers for an event to (asynchronously) execute""" event = event.upper() for func in self._event_handlers[event]: self.loop.create_task(func(**kwargs)) # This will unblock anyone that is awaiting on the next loop update, # while still ensuring the next `await client.wait(event)` doesn't # immediately fire. async_event = self._events[event] async_event.set() async_event.clear()
[ "def", "trigger", "(", "self", ",", "event", ":", "str", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "None", ":", "event", "=", "event", ".", "upper", "(", ")", "for", "func", "in", "self", ".", "_event_handlers", "[", "event", "]", ":", "self", ".", "loop", ".", "create_task", "(", "func", "(", "*", "*", "kwargs", ")", ")", "# This will unblock anyone that is awaiting on the next loop update,", "# while still ensuring the next `await client.wait(event)` doesn't", "# immediately fire.", "async_event", "=", "self", ".", "_events", "[", "event", "]", "async_event", ".", "set", "(", ")", "async_event", ".", "clear", "(", ")" ]
Trigger all handlers for an event to (asynchronously) execute
[ "Trigger", "all", "handlers", "for", "an", "event", "to", "(", "asynchronously", ")", "execute" ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/client.py#L94-L104
numberoverzero/bottom
bottom/client.py
RawClient.on
def on(self, event: str, func: Optional[Callable] = None) -> Callable: """ Decorate a function to be invoked when the given event occurs. The function may be a coroutine. Your function should accept **kwargs in case an event is triggered with unexpected kwargs. Example ------- import asyncio import bottom client = bottom.Client(...) @client.on("test") async def func(one, two, **kwargs): print(one) print(two) print(kwargs) events.trigger("test", **{"one": 1, "two": 2, "extra": "foo"}) loop = asyncio.get_event_loop() # Run all queued events loop.stop() loop.run_forever() """ if func is None: return functools.partial(self.on, event) # type: ignore wrapped = func if not asyncio.iscoroutinefunction(wrapped): wrapped = asyncio.coroutine(wrapped) self._event_handlers[event.upper()].append(wrapped) # Always return original return func
python
def on(self, event: str, func: Optional[Callable] = None) -> Callable: """ Decorate a function to be invoked when the given event occurs. The function may be a coroutine. Your function should accept **kwargs in case an event is triggered with unexpected kwargs. Example ------- import asyncio import bottom client = bottom.Client(...) @client.on("test") async def func(one, two, **kwargs): print(one) print(two) print(kwargs) events.trigger("test", **{"one": 1, "two": 2, "extra": "foo"}) loop = asyncio.get_event_loop() # Run all queued events loop.stop() loop.run_forever() """ if func is None: return functools.partial(self.on, event) # type: ignore wrapped = func if not asyncio.iscoroutinefunction(wrapped): wrapped = asyncio.coroutine(wrapped) self._event_handlers[event.upper()].append(wrapped) # Always return original return func
[ "def", "on", "(", "self", ",", "event", ":", "str", ",", "func", ":", "Optional", "[", "Callable", "]", "=", "None", ")", "->", "Callable", ":", "if", "func", "is", "None", ":", "return", "functools", ".", "partial", "(", "self", ".", "on", ",", "event", ")", "# type: ignore", "wrapped", "=", "func", "if", "not", "asyncio", ".", "iscoroutinefunction", "(", "wrapped", ")", ":", "wrapped", "=", "asyncio", ".", "coroutine", "(", "wrapped", ")", "self", ".", "_event_handlers", "[", "event", ".", "upper", "(", ")", "]", ".", "append", "(", "wrapped", ")", "# Always return original", "return", "func" ]
Decorate a function to be invoked when the given event occurs. The function may be a coroutine. Your function should accept **kwargs in case an event is triggered with unexpected kwargs. Example ------- import asyncio import bottom client = bottom.Client(...) @client.on("test") async def func(one, two, **kwargs): print(one) print(two) print(kwargs) events.trigger("test", **{"one": 1, "two": 2, "extra": "foo"}) loop = asyncio.get_event_loop() # Run all queued events loop.stop() loop.run_forever()
[ "Decorate", "a", "function", "to", "be", "invoked", "when", "the", "given", "event", "occurs", "." ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/client.py#L110-L142
numberoverzero/bottom
bottom/client.py
Client.send
def send(self, command: str, **kwargs: Any) -> None: """ Send a message to the server. .. code-block:: python client.send("nick", nick="weatherbot") client.send("privmsg", target="#python", message="Hello, World!") """ packed_command = pack_command(command, **kwargs).strip() self.send_raw(packed_command)
python
def send(self, command: str, **kwargs: Any) -> None: """ Send a message to the server. .. code-block:: python client.send("nick", nick="weatherbot") client.send("privmsg", target="#python", message="Hello, World!") """ packed_command = pack_command(command, **kwargs).strip() self.send_raw(packed_command)
[ "def", "send", "(", "self", ",", "command", ":", "str", ",", "*", "*", "kwargs", ":", "Any", ")", "->", "None", ":", "packed_command", "=", "pack_command", "(", "command", ",", "*", "*", "kwargs", ")", ".", "strip", "(", ")", "self", ".", "send_raw", "(", "packed_command", ")" ]
Send a message to the server. .. code-block:: python client.send("nick", nick="weatherbot") client.send("privmsg", target="#python", message="Hello, World!")
[ "Send", "a", "message", "to", "the", "server", "." ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/bottom/client.py#L170-L181
numberoverzero/bottom
examples/regex.py
Router._handle
def _handle(self, nick, target, message, **kwargs): """ client callback entrance """ for regex, (func, pattern) in self.routes.items(): match = regex.match(message) if match: self.client.loop.create_task( func(nick, target, message, match, **kwargs))
python
def _handle(self, nick, target, message, **kwargs): """ client callback entrance """ for regex, (func, pattern) in self.routes.items(): match = regex.match(message) if match: self.client.loop.create_task( func(nick, target, message, match, **kwargs))
[ "def", "_handle", "(", "self", ",", "nick", ",", "target", ",", "message", ",", "*", "*", "kwargs", ")", ":", "for", "regex", ",", "(", "func", ",", "pattern", ")", "in", "self", ".", "routes", ".", "items", "(", ")", ":", "match", "=", "regex", ".", "match", "(", "message", ")", "if", "match", ":", "self", ".", "client", ".", "loop", ".", "create_task", "(", "func", "(", "nick", ",", "target", ",", "message", ",", "match", ",", "*", "*", "kwargs", ")", ")" ]
client callback entrance
[ "client", "callback", "entrance" ]
train
https://github.com/numberoverzero/bottom/blob/9ba5f8e22d4990071e3606256e9bc1f64ec989fe/examples/regex.py#L12-L18
miccoli/pyownet
examples/owget.py
main
def main(): """parse commandline arguments and print result""" fcodes = collections.OrderedDict(( ('f.i', protocol.FLG_FORMAT_FDI), ('fi', protocol.FLG_FORMAT_FI), ('f.i.c', protocol.FLG_FORMAT_FDIDC), ('f.ic', protocol.FLG_FORMAT_FDIC), ('fi.c', protocol.FLG_FORMAT_FIDC), ('fic', protocol.FLG_FORMAT_FIC), )) def make_parser(): # command line parsing parser = argparse.ArgumentParser() # positional args parser.add_argument('uri', metavar='URI', nargs='?', default='/', help='[owserver:]//hostname:port/path') # optional args for temperature scale parser.set_defaults(t_flags=protocol.FLG_TEMP_C) tempg = parser.add_mutually_exclusive_group() tempg.add_argument('-C', '--Celsius', const=protocol.FLG_TEMP_C, help='Celsius(default) temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-F', '--Fahrenheit', const=protocol.FLG_TEMP_F, help='Fahrenheit temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-K', '--Kelvin', const=protocol.FLG_TEMP_K, help='Kelvin temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-R', '--Rankine', const=protocol.FLG_TEMP_R, help='Rankine temperature scale', dest='t_flags', action='store_const', ) # optional arg for address format parser.set_defaults(format='f.i') parser.add_argument('-f', '--format', choices=fcodes, help='format for 1-wire unique serial IDs display') # optional arg for output format tempg = parser.add_mutually_exclusive_group() tempg.add_argument('--hex', action='store_true', help='write data in hex format') tempg.add_argument('-b', '--binary', action='store_true', help='output binary data') # debug output parser.add_argument('-d', '--debug', action='store_true', help='debug output') return parser def print_data(data): # format and print data if args.binary: if sys.version_info < (3, ): sys.stdout.write(data) else: sys.stdout.buffer.write(data) else: if args.hex: data = hexlify(data) else: try: data = data.decode('ascii') except UnicodeDecodeError: data = repr(data) print(data) # # main program starts here # # # parse command line arguments # parser = make_parser() args = parser.parse_args() # # parse args.uri and substitute defaults # urlc = urlsplit(args.uri, scheme='owserver', allow_fragments=False) assert urlc.fragment == '' if urlc.scheme != 'owserver': parser.error("Invalid URI scheme '{0}:'".format(urlc.scheme)) if urlc.query: parser.error("Invalid URI, query component '?{0}' not allowed" .format(urlc.query)) try: host = urlc.hostname or 'localhost' port = urlc.port or 4304 except ValueError as error: parser.error("Invalid URI: invalid net location '//{0}/'" .format(urlc.netloc)) # # create owserver proxy object # try: owproxy = protocol.proxy( host, port, flags=args.t_flags | fcodes[args.format], verbose=args.debug, ) except protocol.ConnError as error: print("Unable to open connection to '{0}:{1}'\nSystem error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1) except protocol.ProtocolError as error: print("'{0}:{1}' not an owserver?\nProtocol error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1) # # query owserver and print results # try: if urlc.path.endswith('/'): for path in owproxy.dir(urlc.path, bus=True): print(path) else: data = owproxy.read(urlc.path) print_data(data) except protocol.OwnetError as error: print("Remote server error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1) except protocol.ProtocolError as error: print("'{0}:{1}' buggy?\nProtocol error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1)
python
def main(): """parse commandline arguments and print result""" fcodes = collections.OrderedDict(( ('f.i', protocol.FLG_FORMAT_FDI), ('fi', protocol.FLG_FORMAT_FI), ('f.i.c', protocol.FLG_FORMAT_FDIDC), ('f.ic', protocol.FLG_FORMAT_FDIC), ('fi.c', protocol.FLG_FORMAT_FIDC), ('fic', protocol.FLG_FORMAT_FIC), )) def make_parser(): # command line parsing parser = argparse.ArgumentParser() # positional args parser.add_argument('uri', metavar='URI', nargs='?', default='/', help='[owserver:]//hostname:port/path') # optional args for temperature scale parser.set_defaults(t_flags=protocol.FLG_TEMP_C) tempg = parser.add_mutually_exclusive_group() tempg.add_argument('-C', '--Celsius', const=protocol.FLG_TEMP_C, help='Celsius(default) temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-F', '--Fahrenheit', const=protocol.FLG_TEMP_F, help='Fahrenheit temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-K', '--Kelvin', const=protocol.FLG_TEMP_K, help='Kelvin temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-R', '--Rankine', const=protocol.FLG_TEMP_R, help='Rankine temperature scale', dest='t_flags', action='store_const', ) # optional arg for address format parser.set_defaults(format='f.i') parser.add_argument('-f', '--format', choices=fcodes, help='format for 1-wire unique serial IDs display') # optional arg for output format tempg = parser.add_mutually_exclusive_group() tempg.add_argument('--hex', action='store_true', help='write data in hex format') tempg.add_argument('-b', '--binary', action='store_true', help='output binary data') # debug output parser.add_argument('-d', '--debug', action='store_true', help='debug output') return parser def print_data(data): # format and print data if args.binary: if sys.version_info < (3, ): sys.stdout.write(data) else: sys.stdout.buffer.write(data) else: if args.hex: data = hexlify(data) else: try: data = data.decode('ascii') except UnicodeDecodeError: data = repr(data) print(data) # # main program starts here # # # parse command line arguments # parser = make_parser() args = parser.parse_args() # # parse args.uri and substitute defaults # urlc = urlsplit(args.uri, scheme='owserver', allow_fragments=False) assert urlc.fragment == '' if urlc.scheme != 'owserver': parser.error("Invalid URI scheme '{0}:'".format(urlc.scheme)) if urlc.query: parser.error("Invalid URI, query component '?{0}' not allowed" .format(urlc.query)) try: host = urlc.hostname or 'localhost' port = urlc.port or 4304 except ValueError as error: parser.error("Invalid URI: invalid net location '//{0}/'" .format(urlc.netloc)) # # create owserver proxy object # try: owproxy = protocol.proxy( host, port, flags=args.t_flags | fcodes[args.format], verbose=args.debug, ) except protocol.ConnError as error: print("Unable to open connection to '{0}:{1}'\nSystem error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1) except protocol.ProtocolError as error: print("'{0}:{1}' not an owserver?\nProtocol error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1) # # query owserver and print results # try: if urlc.path.endswith('/'): for path in owproxy.dir(urlc.path, bus=True): print(path) else: data = owproxy.read(urlc.path) print_data(data) except protocol.OwnetError as error: print("Remote server error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1) except protocol.ProtocolError as error: print("'{0}:{1}' buggy?\nProtocol error: {2}" .format(host, port, error), file=sys.stderr) sys.exit(1)
[ "def", "main", "(", ")", ":", "fcodes", "=", "collections", ".", "OrderedDict", "(", "(", "(", "'f.i'", ",", "protocol", ".", "FLG_FORMAT_FDI", ")", ",", "(", "'fi'", ",", "protocol", ".", "FLG_FORMAT_FI", ")", ",", "(", "'f.i.c'", ",", "protocol", ".", "FLG_FORMAT_FDIDC", ")", ",", "(", "'f.ic'", ",", "protocol", ".", "FLG_FORMAT_FDIC", ")", ",", "(", "'fi.c'", ",", "protocol", ".", "FLG_FORMAT_FIDC", ")", ",", "(", "'fic'", ",", "protocol", ".", "FLG_FORMAT_FIC", ")", ",", ")", ")", "def", "make_parser", "(", ")", ":", "# command line parsing", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "# positional args", "parser", ".", "add_argument", "(", "'uri'", ",", "metavar", "=", "'URI'", ",", "nargs", "=", "'?'", ",", "default", "=", "'/'", ",", "help", "=", "'[owserver:]//hostname:port/path'", ")", "# optional args for temperature scale", "parser", ".", "set_defaults", "(", "t_flags", "=", "protocol", ".", "FLG_TEMP_C", ")", "tempg", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "tempg", ".", "add_argument", "(", "'-C'", ",", "'--Celsius'", ",", "const", "=", "protocol", ".", "FLG_TEMP_C", ",", "help", "=", "'Celsius(default) temperature scale'", ",", "dest", "=", "'t_flags'", ",", "action", "=", "'store_const'", ",", ")", "tempg", ".", "add_argument", "(", "'-F'", ",", "'--Fahrenheit'", ",", "const", "=", "protocol", ".", "FLG_TEMP_F", ",", "help", "=", "'Fahrenheit temperature scale'", ",", "dest", "=", "'t_flags'", ",", "action", "=", "'store_const'", ",", ")", "tempg", ".", "add_argument", "(", "'-K'", ",", "'--Kelvin'", ",", "const", "=", "protocol", ".", "FLG_TEMP_K", ",", "help", "=", "'Kelvin temperature scale'", ",", "dest", "=", "'t_flags'", ",", "action", "=", "'store_const'", ",", ")", "tempg", ".", "add_argument", "(", "'-R'", ",", "'--Rankine'", ",", "const", "=", "protocol", ".", "FLG_TEMP_R", ",", "help", "=", "'Rankine temperature scale'", ",", "dest", "=", "'t_flags'", ",", "action", "=", "'store_const'", ",", ")", "# optional arg for address format", "parser", ".", "set_defaults", "(", "format", "=", "'f.i'", ")", "parser", ".", "add_argument", "(", "'-f'", ",", "'--format'", ",", "choices", "=", "fcodes", ",", "help", "=", "'format for 1-wire unique serial IDs display'", ")", "# optional arg for output format", "tempg", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "tempg", ".", "add_argument", "(", "'--hex'", ",", "action", "=", "'store_true'", ",", "help", "=", "'write data in hex format'", ")", "tempg", ".", "add_argument", "(", "'-b'", ",", "'--binary'", ",", "action", "=", "'store_true'", ",", "help", "=", "'output binary data'", ")", "# debug output", "parser", ".", "add_argument", "(", "'-d'", ",", "'--debug'", ",", "action", "=", "'store_true'", ",", "help", "=", "'debug output'", ")", "return", "parser", "def", "print_data", "(", "data", ")", ":", "# format and print data", "if", "args", ".", "binary", ":", "if", "sys", ".", "version_info", "<", "(", "3", ",", ")", ":", "sys", ".", "stdout", ".", "write", "(", "data", ")", "else", ":", "sys", ".", "stdout", ".", "buffer", ".", "write", "(", "data", ")", "else", ":", "if", "args", ".", "hex", ":", "data", "=", "hexlify", "(", "data", ")", "else", ":", "try", ":", "data", "=", "data", ".", "decode", "(", "'ascii'", ")", "except", "UnicodeDecodeError", ":", "data", "=", "repr", "(", "data", ")", "print", "(", "data", ")", "#", "# main program starts here", "#", "#", "# parse command line arguments", "#", "parser", "=", "make_parser", "(", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "#", "# parse args.uri and substitute defaults", "#", "urlc", "=", "urlsplit", "(", "args", ".", "uri", ",", "scheme", "=", "'owserver'", ",", "allow_fragments", "=", "False", ")", "assert", "urlc", ".", "fragment", "==", "''", "if", "urlc", ".", "scheme", "!=", "'owserver'", ":", "parser", ".", "error", "(", "\"Invalid URI scheme '{0}:'\"", ".", "format", "(", "urlc", ".", "scheme", ")", ")", "if", "urlc", ".", "query", ":", "parser", ".", "error", "(", "\"Invalid URI, query component '?{0}' not allowed\"", ".", "format", "(", "urlc", ".", "query", ")", ")", "try", ":", "host", "=", "urlc", ".", "hostname", "or", "'localhost'", "port", "=", "urlc", ".", "port", "or", "4304", "except", "ValueError", "as", "error", ":", "parser", ".", "error", "(", "\"Invalid URI: invalid net location '//{0}/'\"", ".", "format", "(", "urlc", ".", "netloc", ")", ")", "#", "# create owserver proxy object", "#", "try", ":", "owproxy", "=", "protocol", ".", "proxy", "(", "host", ",", "port", ",", "flags", "=", "args", ".", "t_flags", "|", "fcodes", "[", "args", ".", "format", "]", ",", "verbose", "=", "args", ".", "debug", ",", ")", "except", "protocol", ".", "ConnError", "as", "error", ":", "print", "(", "\"Unable to open connection to '{0}:{1}'\\nSystem error: {2}\"", ".", "format", "(", "host", ",", "port", ",", "error", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "except", "protocol", ".", "ProtocolError", "as", "error", ":", "print", "(", "\"'{0}:{1}' not an owserver?\\nProtocol error: {2}\"", ".", "format", "(", "host", ",", "port", ",", "error", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "#", "# query owserver and print results", "#", "try", ":", "if", "urlc", ".", "path", ".", "endswith", "(", "'/'", ")", ":", "for", "path", "in", "owproxy", ".", "dir", "(", "urlc", ".", "path", ",", "bus", "=", "True", ")", ":", "print", "(", "path", ")", "else", ":", "data", "=", "owproxy", ".", "read", "(", "urlc", ".", "path", ")", "print_data", "(", "data", ")", "except", "protocol", ".", "OwnetError", "as", "error", ":", "print", "(", "\"Remote server error: {2}\"", ".", "format", "(", "host", ",", "port", ",", "error", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "except", "protocol", ".", "ProtocolError", "as", "error", ":", "print", "(", "\"'{0}:{1}' buggy?\\nProtocol error: {2}\"", ".", "format", "(", "host", ",", "port", ",", "error", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")" ]
parse commandline arguments and print result
[ "parse", "commandline", "arguments", "and", "print", "result" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/examples/owget.py#L38-L170
miccoli/pyownet
diags/stress_t.py
main
def main(): """parse commandline arguments and print result""" # # setup command line parsing a la argpase # parser = argparse.ArgumentParser() # positional args parser.add_argument('uri', metavar='URI', nargs='?', default='/', help='[owserver:]//hostname:port/path') # # parse command line args # args = parser.parse_args() # # parse args.uri and substitute defaults # urlc = urlsplit(args.uri, scheme='owserver', allow_fragments=False) assert urlc.fragment == '' if urlc.scheme != 'owserver': parser.error("Invalid URI scheme '{0}:'".format(urlc.scheme)) if urlc.query: parser.error("Invalid URI, query component '?{0}' not allowed" .format(urlc.query)) try: host = urlc.hostname or 'localhost' port = urlc.port or 4304 except ValueError as error: parser.error("Invalid URI: invalid net location '//{0}/'" .format(urlc.netloc)) # # create owserver proxy object # try: owproxy = protocol.proxy(host, port, persistent=True) except protocol.ConnError as error: print("Unable to open connection to '{0}:{1}'\n{2}" .format(host, port, error), file=sys.stderr) sys.exit(1) except protocol.ProtocolError as error: print("Protocol error, '{0}:{1}' not an owserver?\n{2}" .format(host, port, error), file=sys.stderr) sys.exit(1) stress(owproxy, urlc.path)
python
def main(): """parse commandline arguments and print result""" # # setup command line parsing a la argpase # parser = argparse.ArgumentParser() # positional args parser.add_argument('uri', metavar='URI', nargs='?', default='/', help='[owserver:]//hostname:port/path') # # parse command line args # args = parser.parse_args() # # parse args.uri and substitute defaults # urlc = urlsplit(args.uri, scheme='owserver', allow_fragments=False) assert urlc.fragment == '' if urlc.scheme != 'owserver': parser.error("Invalid URI scheme '{0}:'".format(urlc.scheme)) if urlc.query: parser.error("Invalid URI, query component '?{0}' not allowed" .format(urlc.query)) try: host = urlc.hostname or 'localhost' port = urlc.port or 4304 except ValueError as error: parser.error("Invalid URI: invalid net location '//{0}/'" .format(urlc.netloc)) # # create owserver proxy object # try: owproxy = protocol.proxy(host, port, persistent=True) except protocol.ConnError as error: print("Unable to open connection to '{0}:{1}'\n{2}" .format(host, port, error), file=sys.stderr) sys.exit(1) except protocol.ProtocolError as error: print("Protocol error, '{0}:{1}' not an owserver?\n{2}" .format(host, port, error), file=sys.stderr) sys.exit(1) stress(owproxy, urlc.path)
[ "def", "main", "(", ")", ":", "#", "# setup command line parsing a la argpase", "#", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "# positional args", "parser", ".", "add_argument", "(", "'uri'", ",", "metavar", "=", "'URI'", ",", "nargs", "=", "'?'", ",", "default", "=", "'/'", ",", "help", "=", "'[owserver:]//hostname:port/path'", ")", "#", "# parse command line args", "#", "args", "=", "parser", ".", "parse_args", "(", ")", "#", "# parse args.uri and substitute defaults", "#", "urlc", "=", "urlsplit", "(", "args", ".", "uri", ",", "scheme", "=", "'owserver'", ",", "allow_fragments", "=", "False", ")", "assert", "urlc", ".", "fragment", "==", "''", "if", "urlc", ".", "scheme", "!=", "'owserver'", ":", "parser", ".", "error", "(", "\"Invalid URI scheme '{0}:'\"", ".", "format", "(", "urlc", ".", "scheme", ")", ")", "if", "urlc", ".", "query", ":", "parser", ".", "error", "(", "\"Invalid URI, query component '?{0}' not allowed\"", ".", "format", "(", "urlc", ".", "query", ")", ")", "try", ":", "host", "=", "urlc", ".", "hostname", "or", "'localhost'", "port", "=", "urlc", ".", "port", "or", "4304", "except", "ValueError", "as", "error", ":", "parser", ".", "error", "(", "\"Invalid URI: invalid net location '//{0}/'\"", ".", "format", "(", "urlc", ".", "netloc", ")", ")", "#", "# create owserver proxy object", "#", "try", ":", "owproxy", "=", "protocol", ".", "proxy", "(", "host", ",", "port", ",", "persistent", "=", "True", ")", "except", "protocol", ".", "ConnError", "as", "error", ":", "print", "(", "\"Unable to open connection to '{0}:{1}'\\n{2}\"", ".", "format", "(", "host", ",", "port", ",", "error", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "except", "protocol", ".", "ProtocolError", "as", "error", ":", "print", "(", "\"Protocol error, '{0}:{1}' not an owserver?\\n{2}\"", ".", "format", "(", "host", ",", "port", ",", "error", ")", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "1", ")", "stress", "(", "owproxy", ",", "urlc", ".", "path", ")" ]
parse commandline arguments and print result
[ "parse", "commandline", "arguments", "and", "print", "result" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/diags/stress_t.py#L64-L112
SeanOC/sharpy
sharpy/client.py
Client.build_url
def build_url(self, path, params=None): ''' Constructs the url for a cheddar API resource ''' url = u'%s/%s/productCode/%s' % ( self.endpoint, path, self.product_code, ) if params: for key, value in params.items(): url = u'%s/%s/%s' % (url, key, value) return url
python
def build_url(self, path, params=None): ''' Constructs the url for a cheddar API resource ''' url = u'%s/%s/productCode/%s' % ( self.endpoint, path, self.product_code, ) if params: for key, value in params.items(): url = u'%s/%s/%s' % (url, key, value) return url
[ "def", "build_url", "(", "self", ",", "path", ",", "params", "=", "None", ")", ":", "url", "=", "u'%s/%s/productCode/%s'", "%", "(", "self", ".", "endpoint", ",", "path", ",", "self", ".", "product_code", ",", ")", "if", "params", ":", "for", "key", ",", "value", "in", "params", ".", "items", "(", ")", ":", "url", "=", "u'%s/%s/%s'", "%", "(", "url", ",", "key", ",", "value", ")", "return", "url" ]
Constructs the url for a cheddar API resource
[ "Constructs", "the", "url", "for", "a", "cheddar", "API", "resource" ]
train
https://github.com/SeanOC/sharpy/blob/935943ca86034255f0a93c1a84734814be176ed4/sharpy/client.py#L33-L46
SeanOC/sharpy
sharpy/client.py
Client.make_request
def make_request(self, path, params=None, data=None, method=None): ''' Makes a request to the cheddar api using the authentication and configuration settings available. ''' # Setup values url = self.build_url(path, params) client_log.debug('Requesting: %s' % url) method = method or 'GET' body = None headers = {} if data: method = 'POST' body = urlencode(data) headers = { 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', } client_log.debug('Request Method: %s' % method) client_log.debug('Request Body(Data): %s' % data) client_log.debug('Request Body(Raw): %s' % body) # Setup http client h = httplib2.Http(cache=self.cache, timeout=self.timeout) #h.add_credentials(self.username, self.password) # Skip the normal http client behavior and send auth headers immediately # to save an http request. headers['Authorization'] = "Basic %s" % base64.standard_b64encode(self.username + ':' + self.password).strip() # Make request response, content = h.request(url, method, body=body, headers=headers) status = response.status client_log.debug('Response Status: %d' % status) client_log.debug('Response Content: %s' % content) if status != 200 and status != 302: exception_class = CheddarError if status == 401: exception_class = AccessDenied elif status == 400: exception_class = BadRequest elif status == 404: exception_class = NotFound elif status == 412: exception_class = PreconditionFailed elif status == 500: exception_class = CheddarFailure elif status == 502: exception_class = NaughtyGateway elif status == 422: exception_class = UnprocessableEntity raise exception_class(response, content) response.content = content return response
python
def make_request(self, path, params=None, data=None, method=None): ''' Makes a request to the cheddar api using the authentication and configuration settings available. ''' # Setup values url = self.build_url(path, params) client_log.debug('Requesting: %s' % url) method = method or 'GET' body = None headers = {} if data: method = 'POST' body = urlencode(data) headers = { 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', } client_log.debug('Request Method: %s' % method) client_log.debug('Request Body(Data): %s' % data) client_log.debug('Request Body(Raw): %s' % body) # Setup http client h = httplib2.Http(cache=self.cache, timeout=self.timeout) #h.add_credentials(self.username, self.password) # Skip the normal http client behavior and send auth headers immediately # to save an http request. headers['Authorization'] = "Basic %s" % base64.standard_b64encode(self.username + ':' + self.password).strip() # Make request response, content = h.request(url, method, body=body, headers=headers) status = response.status client_log.debug('Response Status: %d' % status) client_log.debug('Response Content: %s' % content) if status != 200 and status != 302: exception_class = CheddarError if status == 401: exception_class = AccessDenied elif status == 400: exception_class = BadRequest elif status == 404: exception_class = NotFound elif status == 412: exception_class = PreconditionFailed elif status == 500: exception_class = CheddarFailure elif status == 502: exception_class = NaughtyGateway elif status == 422: exception_class = UnprocessableEntity raise exception_class(response, content) response.content = content return response
[ "def", "make_request", "(", "self", ",", "path", ",", "params", "=", "None", ",", "data", "=", "None", ",", "method", "=", "None", ")", ":", "# Setup values", "url", "=", "self", ".", "build_url", "(", "path", ",", "params", ")", "client_log", ".", "debug", "(", "'Requesting: %s'", "%", "url", ")", "method", "=", "method", "or", "'GET'", "body", "=", "None", "headers", "=", "{", "}", "if", "data", ":", "method", "=", "'POST'", "body", "=", "urlencode", "(", "data", ")", "headers", "=", "{", "'content-type'", ":", "'application/x-www-form-urlencoded; charset=UTF-8'", ",", "}", "client_log", ".", "debug", "(", "'Request Method: %s'", "%", "method", ")", "client_log", ".", "debug", "(", "'Request Body(Data): %s'", "%", "data", ")", "client_log", ".", "debug", "(", "'Request Body(Raw): %s'", "%", "body", ")", "# Setup http client", "h", "=", "httplib2", ".", "Http", "(", "cache", "=", "self", ".", "cache", ",", "timeout", "=", "self", ".", "timeout", ")", "#h.add_credentials(self.username, self.password)", "# Skip the normal http client behavior and send auth headers immediately", "# to save an http request.", "headers", "[", "'Authorization'", "]", "=", "\"Basic %s\"", "%", "base64", ".", "standard_b64encode", "(", "self", ".", "username", "+", "':'", "+", "self", ".", "password", ")", ".", "strip", "(", ")", "# Make request", "response", ",", "content", "=", "h", ".", "request", "(", "url", ",", "method", ",", "body", "=", "body", ",", "headers", "=", "headers", ")", "status", "=", "response", ".", "status", "client_log", ".", "debug", "(", "'Response Status: %d'", "%", "status", ")", "client_log", ".", "debug", "(", "'Response Content: %s'", "%", "content", ")", "if", "status", "!=", "200", "and", "status", "!=", "302", ":", "exception_class", "=", "CheddarError", "if", "status", "==", "401", ":", "exception_class", "=", "AccessDenied", "elif", "status", "==", "400", ":", "exception_class", "=", "BadRequest", "elif", "status", "==", "404", ":", "exception_class", "=", "NotFound", "elif", "status", "==", "412", ":", "exception_class", "=", "PreconditionFailed", "elif", "status", "==", "500", ":", "exception_class", "=", "CheddarFailure", "elif", "status", "==", "502", ":", "exception_class", "=", "NaughtyGateway", "elif", "status", "==", "422", ":", "exception_class", "=", "UnprocessableEntity", "raise", "exception_class", "(", "response", ",", "content", ")", "response", ".", "content", "=", "content", "return", "response" ]
Makes a request to the cheddar api using the authentication and configuration settings available.
[ "Makes", "a", "request", "to", "the", "cheddar", "api", "using", "the", "authentication", "and", "configuration", "settings", "available", "." ]
train
https://github.com/SeanOC/sharpy/blob/935943ca86034255f0a93c1a84734814be176ed4/sharpy/client.py#L70-L125
miccoli/pyownet
examples/walk.py
main
def main(): """parse commandline arguments and print result""" # # setup command line parsing a la argpase # parser = argparse.ArgumentParser() # positional args parser.add_argument('uri', metavar='URI', nargs='?', default='/', help='[owserver:]//server:port/entity') # optional args for temperature scale parser.set_defaults(t_flags=protocol.FLG_TEMP_C) tempg = parser.add_mutually_exclusive_group() tempg.add_argument('-C', '--Celsius', const=protocol.FLG_TEMP_C, help='Celsius(default) temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-F', '--Fahrenheit', const=protocol.FLG_TEMP_F, help='Fahrenheit temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-K', '--Kelvin', const=protocol.FLG_TEMP_K, help='Kelvin temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-R', '--Rankine', const=protocol.FLG_TEMP_R, help='Rankine temperature scale', dest='t_flags', action='store_const', ) # optional arg for address format fcodes = collections.OrderedDict(( ('f.i', protocol.FLG_FORMAT_FDI), ('fi', protocol.FLG_FORMAT_FI), ('f.i.c', protocol.FLG_FORMAT_FDIDC), ('f.ic', protocol.FLG_FORMAT_FDIC), ('fi.c', protocol.FLG_FORMAT_FIDC), ('fic', protocol.FLG_FORMAT_FIC), )) parser.set_defaults(format='f.i') parser.add_argument('-f', '--format', choices=fcodes, help='format for 1-wire unique serial IDs display') parser.add_argument('--nosys', '--only-sensors', action='store_false', dest='bus', help='do not descend system directories') # # parse command line args # args = parser.parse_args() # # parse args.uri and substitute defaults # urlc = urlsplit(args.uri, scheme='owserver', allow_fragments=False) if urlc.scheme != 'owserver': parser.error("Invalid URI scheme '{}:'".format(urlc.scheme)) assert not urlc.fragment if urlc.query: parser.error( "Invalid URI '{}', no query component allowed".format(args.uri)) host = urlc.hostname or 'localhost' port = urlc.port or 4304 # # create owserver proxy object # try: proxy = protocol.proxy( host, port, flags=args.t_flags | fcodes[args.format], persistent=True) except (protocol.ConnError, protocol.ProtocolError) as error: parser.exit(status=1, message=str(error) + '\n') def walk(path): try: if not path.endswith('/'): val = proxy.read(path) print("{:40} {!r}".format(path, val)) else: for entity in proxy.dir(path, bus=args.bus): walk(entity) except protocol.OwnetError as error: print('Unable to walk {}: server says {}'.format(path, error), file=sys.stderr) except protocol.ConnError as error: print('Unable to walk {}: {}'.format(path, error), file=sys.stderr) with proxy: walk(urlc.path)
python
def main(): """parse commandline arguments and print result""" # # setup command line parsing a la argpase # parser = argparse.ArgumentParser() # positional args parser.add_argument('uri', metavar='URI', nargs='?', default='/', help='[owserver:]//server:port/entity') # optional args for temperature scale parser.set_defaults(t_flags=protocol.FLG_TEMP_C) tempg = parser.add_mutually_exclusive_group() tempg.add_argument('-C', '--Celsius', const=protocol.FLG_TEMP_C, help='Celsius(default) temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-F', '--Fahrenheit', const=protocol.FLG_TEMP_F, help='Fahrenheit temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-K', '--Kelvin', const=protocol.FLG_TEMP_K, help='Kelvin temperature scale', dest='t_flags', action='store_const', ) tempg.add_argument('-R', '--Rankine', const=protocol.FLG_TEMP_R, help='Rankine temperature scale', dest='t_flags', action='store_const', ) # optional arg for address format fcodes = collections.OrderedDict(( ('f.i', protocol.FLG_FORMAT_FDI), ('fi', protocol.FLG_FORMAT_FI), ('f.i.c', protocol.FLG_FORMAT_FDIDC), ('f.ic', protocol.FLG_FORMAT_FDIC), ('fi.c', protocol.FLG_FORMAT_FIDC), ('fic', protocol.FLG_FORMAT_FIC), )) parser.set_defaults(format='f.i') parser.add_argument('-f', '--format', choices=fcodes, help='format for 1-wire unique serial IDs display') parser.add_argument('--nosys', '--only-sensors', action='store_false', dest='bus', help='do not descend system directories') # # parse command line args # args = parser.parse_args() # # parse args.uri and substitute defaults # urlc = urlsplit(args.uri, scheme='owserver', allow_fragments=False) if urlc.scheme != 'owserver': parser.error("Invalid URI scheme '{}:'".format(urlc.scheme)) assert not urlc.fragment if urlc.query: parser.error( "Invalid URI '{}', no query component allowed".format(args.uri)) host = urlc.hostname or 'localhost' port = urlc.port or 4304 # # create owserver proxy object # try: proxy = protocol.proxy( host, port, flags=args.t_flags | fcodes[args.format], persistent=True) except (protocol.ConnError, protocol.ProtocolError) as error: parser.exit(status=1, message=str(error) + '\n') def walk(path): try: if not path.endswith('/'): val = proxy.read(path) print("{:40} {!r}".format(path, val)) else: for entity in proxy.dir(path, bus=args.bus): walk(entity) except protocol.OwnetError as error: print('Unable to walk {}: server says {}'.format(path, error), file=sys.stderr) except protocol.ConnError as error: print('Unable to walk {}: {}'.format(path, error), file=sys.stderr) with proxy: walk(urlc.path)
[ "def", "main", "(", ")", ":", "#", "# setup command line parsing a la argpase", "#", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "# positional args", "parser", ".", "add_argument", "(", "'uri'", ",", "metavar", "=", "'URI'", ",", "nargs", "=", "'?'", ",", "default", "=", "'/'", ",", "help", "=", "'[owserver:]//server:port/entity'", ")", "# optional args for temperature scale", "parser", ".", "set_defaults", "(", "t_flags", "=", "protocol", ".", "FLG_TEMP_C", ")", "tempg", "=", "parser", ".", "add_mutually_exclusive_group", "(", ")", "tempg", ".", "add_argument", "(", "'-C'", ",", "'--Celsius'", ",", "const", "=", "protocol", ".", "FLG_TEMP_C", ",", "help", "=", "'Celsius(default) temperature scale'", ",", "dest", "=", "'t_flags'", ",", "action", "=", "'store_const'", ",", ")", "tempg", ".", "add_argument", "(", "'-F'", ",", "'--Fahrenheit'", ",", "const", "=", "protocol", ".", "FLG_TEMP_F", ",", "help", "=", "'Fahrenheit temperature scale'", ",", "dest", "=", "'t_flags'", ",", "action", "=", "'store_const'", ",", ")", "tempg", ".", "add_argument", "(", "'-K'", ",", "'--Kelvin'", ",", "const", "=", "protocol", ".", "FLG_TEMP_K", ",", "help", "=", "'Kelvin temperature scale'", ",", "dest", "=", "'t_flags'", ",", "action", "=", "'store_const'", ",", ")", "tempg", ".", "add_argument", "(", "'-R'", ",", "'--Rankine'", ",", "const", "=", "protocol", ".", "FLG_TEMP_R", ",", "help", "=", "'Rankine temperature scale'", ",", "dest", "=", "'t_flags'", ",", "action", "=", "'store_const'", ",", ")", "# optional arg for address format", "fcodes", "=", "collections", ".", "OrderedDict", "(", "(", "(", "'f.i'", ",", "protocol", ".", "FLG_FORMAT_FDI", ")", ",", "(", "'fi'", ",", "protocol", ".", "FLG_FORMAT_FI", ")", ",", "(", "'f.i.c'", ",", "protocol", ".", "FLG_FORMAT_FDIDC", ")", ",", "(", "'f.ic'", ",", "protocol", ".", "FLG_FORMAT_FDIC", ")", ",", "(", "'fi.c'", ",", "protocol", ".", "FLG_FORMAT_FIDC", ")", ",", "(", "'fic'", ",", "protocol", ".", "FLG_FORMAT_FIC", ")", ",", ")", ")", "parser", ".", "set_defaults", "(", "format", "=", "'f.i'", ")", "parser", ".", "add_argument", "(", "'-f'", ",", "'--format'", ",", "choices", "=", "fcodes", ",", "help", "=", "'format for 1-wire unique serial IDs display'", ")", "parser", ".", "add_argument", "(", "'--nosys'", ",", "'--only-sensors'", ",", "action", "=", "'store_false'", ",", "dest", "=", "'bus'", ",", "help", "=", "'do not descend system directories'", ")", "#", "# parse command line args", "#", "args", "=", "parser", ".", "parse_args", "(", ")", "#", "# parse args.uri and substitute defaults", "#", "urlc", "=", "urlsplit", "(", "args", ".", "uri", ",", "scheme", "=", "'owserver'", ",", "allow_fragments", "=", "False", ")", "if", "urlc", ".", "scheme", "!=", "'owserver'", ":", "parser", ".", "error", "(", "\"Invalid URI scheme '{}:'\"", ".", "format", "(", "urlc", ".", "scheme", ")", ")", "assert", "not", "urlc", ".", "fragment", "if", "urlc", ".", "query", ":", "parser", ".", "error", "(", "\"Invalid URI '{}', no query component allowed\"", ".", "format", "(", "args", ".", "uri", ")", ")", "host", "=", "urlc", ".", "hostname", "or", "'localhost'", "port", "=", "urlc", ".", "port", "or", "4304", "#", "# create owserver proxy object", "#", "try", ":", "proxy", "=", "protocol", ".", "proxy", "(", "host", ",", "port", ",", "flags", "=", "args", ".", "t_flags", "|", "fcodes", "[", "args", ".", "format", "]", ",", "persistent", "=", "True", ")", "except", "(", "protocol", ".", "ConnError", ",", "protocol", ".", "ProtocolError", ")", "as", "error", ":", "parser", ".", "exit", "(", "status", "=", "1", ",", "message", "=", "str", "(", "error", ")", "+", "'\\n'", ")", "def", "walk", "(", "path", ")", ":", "try", ":", "if", "not", "path", ".", "endswith", "(", "'/'", ")", ":", "val", "=", "proxy", ".", "read", "(", "path", ")", "print", "(", "\"{:40} {!r}\"", ".", "format", "(", "path", ",", "val", ")", ")", "else", ":", "for", "entity", "in", "proxy", ".", "dir", "(", "path", ",", "bus", "=", "args", ".", "bus", ")", ":", "walk", "(", "entity", ")", "except", "protocol", ".", "OwnetError", "as", "error", ":", "print", "(", "'Unable to walk {}: server says {}'", ".", "format", "(", "path", ",", "error", ")", ",", "file", "=", "sys", ".", "stderr", ")", "except", "protocol", ".", "ConnError", "as", "error", ":", "print", "(", "'Unable to walk {}: {}'", ".", "format", "(", "path", ",", "error", ")", ",", "file", "=", "sys", ".", "stderr", ")", "with", "proxy", ":", "walk", "(", "urlc", ".", "path", ")" ]
parse commandline arguments and print result
[ "parse", "commandline", "arguments", "and", "print", "result" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/examples/walk.py#L38-L124
miccoli/pyownet
src/pyownet/protocol.py
proxy
def proxy(host='localhost', port=4304, flags=0, persistent=False, verbose=False, ): """factory function that returns a proxy object for an owserver at host, port. """ # resolve host name/port try: gai = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP) except socket.gaierror as err: raise ConnError(*err.args) # gai is a (non empty) list of tuples, search for the first working one assert gai for (family, _type, _proto, _, sockaddr) in gai: assert _type is socket.SOCK_STREAM and _proto is socket.IPPROTO_TCP owp = _Proxy(family, sockaddr, flags, verbose) try: # check if there is an owserver listening owp.ping() except ConnError as err: # no connection, go over to next sockaddr lasterr = err.args continue else: # ok, live owserver found, stop searching break else: # no server listening on (family, sockaddr) found: raise ConnError(*lasterr) # init errno to errmessage mapping # FIXME: should this be only optional? owp._init_errcodes() if persistent: owp = clone(owp, persistent=True) # here we should have all connections closed assert not isinstance(owp, _PersistentProxy) or owp.conn is None return owp
python
def proxy(host='localhost', port=4304, flags=0, persistent=False, verbose=False, ): """factory function that returns a proxy object for an owserver at host, port. """ # resolve host name/port try: gai = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP) except socket.gaierror as err: raise ConnError(*err.args) # gai is a (non empty) list of tuples, search for the first working one assert gai for (family, _type, _proto, _, sockaddr) in gai: assert _type is socket.SOCK_STREAM and _proto is socket.IPPROTO_TCP owp = _Proxy(family, sockaddr, flags, verbose) try: # check if there is an owserver listening owp.ping() except ConnError as err: # no connection, go over to next sockaddr lasterr = err.args continue else: # ok, live owserver found, stop searching break else: # no server listening on (family, sockaddr) found: raise ConnError(*lasterr) # init errno to errmessage mapping # FIXME: should this be only optional? owp._init_errcodes() if persistent: owp = clone(owp, persistent=True) # here we should have all connections closed assert not isinstance(owp, _PersistentProxy) or owp.conn is None return owp
[ "def", "proxy", "(", "host", "=", "'localhost'", ",", "port", "=", "4304", ",", "flags", "=", "0", ",", "persistent", "=", "False", ",", "verbose", "=", "False", ",", ")", ":", "# resolve host name/port", "try", ":", "gai", "=", "socket", ".", "getaddrinfo", "(", "host", ",", "port", ",", "0", ",", "socket", ".", "SOCK_STREAM", ",", "socket", ".", "IPPROTO_TCP", ")", "except", "socket", ".", "gaierror", "as", "err", ":", "raise", "ConnError", "(", "*", "err", ".", "args", ")", "# gai is a (non empty) list of tuples, search for the first working one", "assert", "gai", "for", "(", "family", ",", "_type", ",", "_proto", ",", "_", ",", "sockaddr", ")", "in", "gai", ":", "assert", "_type", "is", "socket", ".", "SOCK_STREAM", "and", "_proto", "is", "socket", ".", "IPPROTO_TCP", "owp", "=", "_Proxy", "(", "family", ",", "sockaddr", ",", "flags", ",", "verbose", ")", "try", ":", "# check if there is an owserver listening", "owp", ".", "ping", "(", ")", "except", "ConnError", "as", "err", ":", "# no connection, go over to next sockaddr", "lasterr", "=", "err", ".", "args", "continue", "else", ":", "# ok, live owserver found, stop searching", "break", "else", ":", "# no server listening on (family, sockaddr) found:", "raise", "ConnError", "(", "*", "lasterr", ")", "# init errno to errmessage mapping", "# FIXME: should this be only optional?", "owp", ".", "_init_errcodes", "(", ")", "if", "persistent", ":", "owp", "=", "clone", "(", "owp", ",", "persistent", "=", "True", ")", "# here we should have all connections closed", "assert", "not", "isinstance", "(", "owp", ",", "_PersistentProxy", ")", "or", "owp", ".", "conn", "is", "None", "return", "owp" ]
factory function that returns a proxy object for an owserver at host, port.
[ "factory", "function", "that", "returns", "a", "proxy", "object", "for", "an", "owserver", "at", "host", "port", "." ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L708-L750
miccoli/pyownet
src/pyownet/protocol.py
clone
def clone(proxy, persistent=True): """factory function for cloning a proxy object""" if not isinstance(proxy, _Proxy): raise TypeError('argument is not a Proxy object') if persistent: pclass = _PersistentProxy else: pclass = _Proxy return pclass(proxy._family, proxy._sockaddr, proxy.flags & ~FLG_PERSISTENCE, proxy.verbose, proxy.errmess)
python
def clone(proxy, persistent=True): """factory function for cloning a proxy object""" if not isinstance(proxy, _Proxy): raise TypeError('argument is not a Proxy object') if persistent: pclass = _PersistentProxy else: pclass = _Proxy return pclass(proxy._family, proxy._sockaddr, proxy.flags & ~FLG_PERSISTENCE, proxy.verbose, proxy.errmess)
[ "def", "clone", "(", "proxy", ",", "persistent", "=", "True", ")", ":", "if", "not", "isinstance", "(", "proxy", ",", "_Proxy", ")", ":", "raise", "TypeError", "(", "'argument is not a Proxy object'", ")", "if", "persistent", ":", "pclass", "=", "_PersistentProxy", "else", ":", "pclass", "=", "_Proxy", "return", "pclass", "(", "proxy", ".", "_family", ",", "proxy", ".", "_sockaddr", ",", "proxy", ".", "flags", "&", "~", "FLG_PERSISTENCE", ",", "proxy", ".", "verbose", ",", "proxy", ".", "errmess", ")" ]
factory function for cloning a proxy object
[ "factory", "function", "for", "cloning", "a", "proxy", "object" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L753-L765
miccoli/pyownet
src/pyownet/protocol.py
_OwnetConnection.shutdown
def shutdown(self): """shutdown connection""" if self.verbose: print(self.socket.getsockname(), 'xx', self.peername) try: self.socket.shutdown(socket.SHUT_RDWR) except IOError as err: assert err.errno is _ENOTCONN, "unexpected IOError: %s" % err # remote peer has already closed the connection, # just ignore the exceeption pass
python
def shutdown(self): """shutdown connection""" if self.verbose: print(self.socket.getsockname(), 'xx', self.peername) try: self.socket.shutdown(socket.SHUT_RDWR) except IOError as err: assert err.errno is _ENOTCONN, "unexpected IOError: %s" % err # remote peer has already closed the connection, # just ignore the exceeption pass
[ "def", "shutdown", "(", "self", ")", ":", "if", "self", ".", "verbose", ":", "print", "(", "self", ".", "socket", ".", "getsockname", "(", ")", ",", "'xx'", ",", "self", ".", "peername", ")", "try", ":", "self", ".", "socket", ".", "shutdown", "(", "socket", ".", "SHUT_RDWR", ")", "except", "IOError", "as", "err", ":", "assert", "err", ".", "errno", "is", "_ENOTCONN", ",", "\"unexpected IOError: %s\"", "%", "err", "# remote peer has already closed the connection,", "# just ignore the exceeption", "pass" ]
shutdown connection
[ "shutdown", "connection" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L391-L403
miccoli/pyownet
src/pyownet/protocol.py
_OwnetConnection.req
def req(self, msgtype, payload, flags, size=0, offset=0, timeout=0): """send message to server and return response""" if timeout < 0: raise ValueError("timeout cannot be negative!") tohead = _ToServerHeader(payload=len(payload), type=msgtype, flags=flags, size=size, offset=offset) tstartcom = monotonic() # set timer when communication begins self._send_msg(tohead, payload) while True: fromhead, data = self._read_msg() if fromhead.payload >= 0: # we received a valid answer and return the result return fromhead.ret, fromhead.flags, data assert msgtype != MSG_NOP # we did not exit the loop because payload is negative # Server said PING to keep connection alive during lenghty op # check if timeout has expired if timeout: tcom = monotonic() - tstartcom if tcom > timeout: raise OwnetTimeout(tcom, timeout)
python
def req(self, msgtype, payload, flags, size=0, offset=0, timeout=0): """send message to server and return response""" if timeout < 0: raise ValueError("timeout cannot be negative!") tohead = _ToServerHeader(payload=len(payload), type=msgtype, flags=flags, size=size, offset=offset) tstartcom = monotonic() # set timer when communication begins self._send_msg(tohead, payload) while True: fromhead, data = self._read_msg() if fromhead.payload >= 0: # we received a valid answer and return the result return fromhead.ret, fromhead.flags, data assert msgtype != MSG_NOP # we did not exit the loop because payload is negative # Server said PING to keep connection alive during lenghty op # check if timeout has expired if timeout: tcom = monotonic() - tstartcom if tcom > timeout: raise OwnetTimeout(tcom, timeout)
[ "def", "req", "(", "self", ",", "msgtype", ",", "payload", ",", "flags", ",", "size", "=", "0", ",", "offset", "=", "0", ",", "timeout", "=", "0", ")", ":", "if", "timeout", "<", "0", ":", "raise", "ValueError", "(", "\"timeout cannot be negative!\"", ")", "tohead", "=", "_ToServerHeader", "(", "payload", "=", "len", "(", "payload", ")", ",", "type", "=", "msgtype", ",", "flags", "=", "flags", ",", "size", "=", "size", ",", "offset", "=", "offset", ")", "tstartcom", "=", "monotonic", "(", ")", "# set timer when communication begins", "self", ".", "_send_msg", "(", "tohead", ",", "payload", ")", "while", "True", ":", "fromhead", ",", "data", "=", "self", ".", "_read_msg", "(", ")", "if", "fromhead", ".", "payload", ">=", "0", ":", "# we received a valid answer and return the result", "return", "fromhead", ".", "ret", ",", "fromhead", ".", "flags", ",", "data", "assert", "msgtype", "!=", "MSG_NOP", "# we did not exit the loop because payload is negative", "# Server said PING to keep connection alive during lenghty op", "# check if timeout has expired", "if", "timeout", ":", "tcom", "=", "monotonic", "(", ")", "-", "tstartcom", "if", "tcom", ">", "timeout", ":", "raise", "OwnetTimeout", "(", "tcom", ",", "timeout", ")" ]
send message to server and return response
[ "send", "message", "to", "server", "and", "return", "response" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L408-L436
miccoli/pyownet
src/pyownet/protocol.py
_OwnetConnection._send_msg
def _send_msg(self, header, payload): """send message to server""" if self.verbose: print('->', repr(header)) print('..', repr(payload)) assert header.payload == len(payload) try: sent = self.socket.send(header + payload) except IOError as err: raise ConnError(*err.args) # FIXME FIXME FIXME: # investigate under which situations socket.send should be retried # instead of aborted. # FIXME FIXME FIXME if sent < len(header + payload): raise ShortWrite(sent, len(header + payload)) assert sent == len(header + payload), sent
python
def _send_msg(self, header, payload): """send message to server""" if self.verbose: print('->', repr(header)) print('..', repr(payload)) assert header.payload == len(payload) try: sent = self.socket.send(header + payload) except IOError as err: raise ConnError(*err.args) # FIXME FIXME FIXME: # investigate under which situations socket.send should be retried # instead of aborted. # FIXME FIXME FIXME if sent < len(header + payload): raise ShortWrite(sent, len(header + payload)) assert sent == len(header + payload), sent
[ "def", "_send_msg", "(", "self", ",", "header", ",", "payload", ")", ":", "if", "self", ".", "verbose", ":", "print", "(", "'->'", ",", "repr", "(", "header", ")", ")", "print", "(", "'..'", ",", "repr", "(", "payload", ")", ")", "assert", "header", ".", "payload", "==", "len", "(", "payload", ")", "try", ":", "sent", "=", "self", ".", "socket", ".", "send", "(", "header", "+", "payload", ")", "except", "IOError", "as", "err", ":", "raise", "ConnError", "(", "*", "err", ".", "args", ")", "# FIXME FIXME FIXME:", "# investigate under which situations socket.send should be retried", "# instead of aborted.", "# FIXME FIXME FIXME", "if", "sent", "<", "len", "(", "header", "+", "payload", ")", ":", "raise", "ShortWrite", "(", "sent", ",", "len", "(", "header", "+", "payload", ")", ")", "assert", "sent", "==", "len", "(", "header", "+", "payload", ")", ",", "sent" ]
send message to server
[ "send", "message", "to", "server" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L438-L456
miccoli/pyownet
src/pyownet/protocol.py
_OwnetConnection._read_msg
def _read_msg(self): """read message from server""" # # NOTE: # '_recv_socket(nbytes)' was implemented as # 'socket.recv(nbytes, socket.MSG_WAITALL)' # but socket.MSG_WAITALL proved not reliable # def _recv_socket(nbytes): """read nbytes bytes from self.socket""" # # code below is written under the assumption that # 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop # is entered rarerly # try: buf = self.socket.recv(nbytes) except IOError as err: raise ConnError(*err.args) if not buf: raise ShortRead(0, nbytes) while len(buf) < nbytes: try: tmp = self.socket.recv(nbytes - len(buf)) except IOError as err: raise ConnError(*err.args) if not tmp: if self.verbose: print('ee', repr(buf)) raise ShortRead(len(buf), nbytes) buf += tmp assert len(buf) == nbytes, (buf, len(buf), nbytes) return buf data = _recv_socket(_FromServerHeader.header_size) header = _FromServerHeader(data) if self.verbose: print('<-', repr(header)) # error conditions if header.version != 0: raise MalformedHeader('bad version', header) if header.payload > MAX_PAYLOAD: raise MalformedHeader('huge payload, unwilling to read', header) if header.payload > 0: payload = _recv_socket(header.payload) if self.verbose: print('..', repr(payload)) assert header.size <= header.payload payload = payload[:header.size] else: payload = bytes() return header, payload
python
def _read_msg(self): """read message from server""" # # NOTE: # '_recv_socket(nbytes)' was implemented as # 'socket.recv(nbytes, socket.MSG_WAITALL)' # but socket.MSG_WAITALL proved not reliable # def _recv_socket(nbytes): """read nbytes bytes from self.socket""" # # code below is written under the assumption that # 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop # is entered rarerly # try: buf = self.socket.recv(nbytes) except IOError as err: raise ConnError(*err.args) if not buf: raise ShortRead(0, nbytes) while len(buf) < nbytes: try: tmp = self.socket.recv(nbytes - len(buf)) except IOError as err: raise ConnError(*err.args) if not tmp: if self.verbose: print('ee', repr(buf)) raise ShortRead(len(buf), nbytes) buf += tmp assert len(buf) == nbytes, (buf, len(buf), nbytes) return buf data = _recv_socket(_FromServerHeader.header_size) header = _FromServerHeader(data) if self.verbose: print('<-', repr(header)) # error conditions if header.version != 0: raise MalformedHeader('bad version', header) if header.payload > MAX_PAYLOAD: raise MalformedHeader('huge payload, unwilling to read', header) if header.payload > 0: payload = _recv_socket(header.payload) if self.verbose: print('..', repr(payload)) assert header.size <= header.payload payload = payload[:header.size] else: payload = bytes() return header, payload
[ "def", "_read_msg", "(", "self", ")", ":", "#", "# NOTE:", "# '_recv_socket(nbytes)' was implemented as", "# 'socket.recv(nbytes, socket.MSG_WAITALL)'", "# but socket.MSG_WAITALL proved not reliable", "#", "def", "_recv_socket", "(", "nbytes", ")", ":", "\"\"\"read nbytes bytes from self.socket\"\"\"", "#", "# code below is written under the assumption that", "# 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop", "# is entered rarerly", "#", "try", ":", "buf", "=", "self", ".", "socket", ".", "recv", "(", "nbytes", ")", "except", "IOError", "as", "err", ":", "raise", "ConnError", "(", "*", "err", ".", "args", ")", "if", "not", "buf", ":", "raise", "ShortRead", "(", "0", ",", "nbytes", ")", "while", "len", "(", "buf", ")", "<", "nbytes", ":", "try", ":", "tmp", "=", "self", ".", "socket", ".", "recv", "(", "nbytes", "-", "len", "(", "buf", ")", ")", "except", "IOError", "as", "err", ":", "raise", "ConnError", "(", "*", "err", ".", "args", ")", "if", "not", "tmp", ":", "if", "self", ".", "verbose", ":", "print", "(", "'ee'", ",", "repr", "(", "buf", ")", ")", "raise", "ShortRead", "(", "len", "(", "buf", ")", ",", "nbytes", ")", "buf", "+=", "tmp", "assert", "len", "(", "buf", ")", "==", "nbytes", ",", "(", "buf", ",", "len", "(", "buf", ")", ",", "nbytes", ")", "return", "buf", "data", "=", "_recv_socket", "(", "_FromServerHeader", ".", "header_size", ")", "header", "=", "_FromServerHeader", "(", "data", ")", "if", "self", ".", "verbose", ":", "print", "(", "'<-'", ",", "repr", "(", "header", ")", ")", "# error conditions", "if", "header", ".", "version", "!=", "0", ":", "raise", "MalformedHeader", "(", "'bad version'", ",", "header", ")", "if", "header", ".", "payload", ">", "MAX_PAYLOAD", ":", "raise", "MalformedHeader", "(", "'huge payload, unwilling to read'", ",", "header", ")", "if", "header", ".", "payload", ">", "0", ":", "payload", "=", "_recv_socket", "(", "header", ".", "payload", ")", "if", "self", ".", "verbose", ":", "print", "(", "'..'", ",", "repr", "(", "payload", ")", ")", "assert", "header", ".", "size", "<=", "header", ".", "payload", "payload", "=", "payload", "[", ":", "header", ".", "size", "]", "else", ":", "payload", "=", "bytes", "(", ")", "return", "header", ",", "payload" ]
read message from server
[ "read", "message", "from", "server" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L458-L519
miccoli/pyownet
src/pyownet/protocol.py
_Proxy.sendmess
def sendmess(self, msgtype, payload, flags=0, size=0, offset=0, timeout=0): """ retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data """ flags |= self.flags assert not (flags & FLG_PERSISTENCE) with self._new_connection() as conn: ret, _, data = conn.req( msgtype, payload, flags, size, offset, timeout) return ret, data
python
def sendmess(self, msgtype, payload, flags=0, size=0, offset=0, timeout=0): """ retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data """ flags |= self.flags assert not (flags & FLG_PERSISTENCE) with self._new_connection() as conn: ret, _, data = conn.req( msgtype, payload, flags, size, offset, timeout) return ret, data
[ "def", "sendmess", "(", "self", ",", "msgtype", ",", "payload", ",", "flags", "=", "0", ",", "size", "=", "0", ",", "offset", "=", "0", ",", "timeout", "=", "0", ")", ":", "flags", "|=", "self", ".", "flags", "assert", "not", "(", "flags", "&", "FLG_PERSISTENCE", ")", "with", "self", ".", "_new_connection", "(", ")", "as", "conn", ":", "ret", ",", "_", ",", "data", "=", "conn", ".", "req", "(", "msgtype", ",", "payload", ",", "flags", ",", "size", ",", "offset", ",", "timeout", ")", "return", "ret", ",", "data" ]
retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data
[ "retcode", "data", "=", "sendmess", "(", "msgtype", "payload", ")", "send", "generic", "message", "and", "returns", "retcode", "data" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L563-L575
miccoli/pyownet
src/pyownet/protocol.py
_Proxy.ping
def ping(self): """sends a NOP packet and waits response; returns None""" ret, data = self.sendmess(MSG_NOP, bytes()) if data or ret > 0: raise ProtocolError('invalid reply to ping message') if ret < 0: raise OwnetError(-ret, self.errmess[-ret])
python
def ping(self): """sends a NOP packet and waits response; returns None""" ret, data = self.sendmess(MSG_NOP, bytes()) if data or ret > 0: raise ProtocolError('invalid reply to ping message') if ret < 0: raise OwnetError(-ret, self.errmess[-ret])
[ "def", "ping", "(", "self", ")", ":", "ret", ",", "data", "=", "self", ".", "sendmess", "(", "MSG_NOP", ",", "bytes", "(", ")", ")", "if", "data", "or", "ret", ">", "0", ":", "raise", "ProtocolError", "(", "'invalid reply to ping message'", ")", "if", "ret", "<", "0", ":", "raise", "OwnetError", "(", "-", "ret", ",", "self", ".", "errmess", "[", "-", "ret", "]", ")" ]
sends a NOP packet and waits response; returns None
[ "sends", "a", "NOP", "packet", "and", "waits", "response", ";", "returns", "None" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L577-L584
miccoli/pyownet
src/pyownet/protocol.py
_Proxy.present
def present(self, path, timeout=0): """returns True if there is an entity at path""" ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path), timeout=timeout) assert ret <= 0 and not data, (ret, data) if ret < 0: return False else: return True
python
def present(self, path, timeout=0): """returns True if there is an entity at path""" ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path), timeout=timeout) assert ret <= 0 and not data, (ret, data) if ret < 0: return False else: return True
[ "def", "present", "(", "self", ",", "path", ",", "timeout", "=", "0", ")", ":", "ret", ",", "data", "=", "self", ".", "sendmess", "(", "MSG_PRESENCE", ",", "str2bytez", "(", "path", ")", ",", "timeout", "=", "timeout", ")", "assert", "ret", "<=", "0", "and", "not", "data", ",", "(", "ret", ",", "data", ")", "if", "ret", "<", "0", ":", "return", "False", "else", ":", "return", "True" ]
returns True if there is an entity at path
[ "returns", "True", "if", "there", "is", "an", "entity", "at", "path" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L586-L595
miccoli/pyownet
src/pyownet/protocol.py
_Proxy.dir
def dir(self, path='/', slash=True, bus=False, timeout=0): """list entities at path""" if slash: msg = MSG_DIRALLSLASH else: msg = MSG_DIRALL if bus: flags = self.flags | FLG_BUS_RET else: flags = self.flags & ~FLG_BUS_RET ret, data = self.sendmess(msg, str2bytez(path), flags, timeout=timeout) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path) if data: return bytes2str(data).split(',') else: return []
python
def dir(self, path='/', slash=True, bus=False, timeout=0): """list entities at path""" if slash: msg = MSG_DIRALLSLASH else: msg = MSG_DIRALL if bus: flags = self.flags | FLG_BUS_RET else: flags = self.flags & ~FLG_BUS_RET ret, data = self.sendmess(msg, str2bytez(path), flags, timeout=timeout) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path) if data: return bytes2str(data).split(',') else: return []
[ "def", "dir", "(", "self", ",", "path", "=", "'/'", ",", "slash", "=", "True", ",", "bus", "=", "False", ",", "timeout", "=", "0", ")", ":", "if", "slash", ":", "msg", "=", "MSG_DIRALLSLASH", "else", ":", "msg", "=", "MSG_DIRALL", "if", "bus", ":", "flags", "=", "self", ".", "flags", "|", "FLG_BUS_RET", "else", ":", "flags", "=", "self", ".", "flags", "&", "~", "FLG_BUS_RET", "ret", ",", "data", "=", "self", ".", "sendmess", "(", "msg", ",", "str2bytez", "(", "path", ")", ",", "flags", ",", "timeout", "=", "timeout", ")", "if", "ret", "<", "0", ":", "raise", "OwnetError", "(", "-", "ret", ",", "self", ".", "errmess", "[", "-", "ret", "]", ",", "path", ")", "if", "data", ":", "return", "bytes2str", "(", "data", ")", ".", "split", "(", "','", ")", "else", ":", "return", "[", "]" ]
list entities at path
[ "list", "entities", "at", "path" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L597-L615
miccoli/pyownet
src/pyownet/protocol.py
_Proxy.read
def read(self, path, size=MAX_PAYLOAD, offset=0, timeout=0): """read data at path""" if size > MAX_PAYLOAD: raise ValueError("size cannot exceed %d" % MAX_PAYLOAD) ret, data = self.sendmess(MSG_READ, str2bytez(path), size=size, offset=offset, timeout=timeout) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path) return data
python
def read(self, path, size=MAX_PAYLOAD, offset=0, timeout=0): """read data at path""" if size > MAX_PAYLOAD: raise ValueError("size cannot exceed %d" % MAX_PAYLOAD) ret, data = self.sendmess(MSG_READ, str2bytez(path), size=size, offset=offset, timeout=timeout) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path) return data
[ "def", "read", "(", "self", ",", "path", ",", "size", "=", "MAX_PAYLOAD", ",", "offset", "=", "0", ",", "timeout", "=", "0", ")", ":", "if", "size", ">", "MAX_PAYLOAD", ":", "raise", "ValueError", "(", "\"size cannot exceed %d\"", "%", "MAX_PAYLOAD", ")", "ret", ",", "data", "=", "self", ".", "sendmess", "(", "MSG_READ", ",", "str2bytez", "(", "path", ")", ",", "size", "=", "size", ",", "offset", "=", "offset", ",", "timeout", "=", "timeout", ")", "if", "ret", "<", "0", ":", "raise", "OwnetError", "(", "-", "ret", ",", "self", ".", "errmess", "[", "-", "ret", "]", ",", "path", ")", "return", "data" ]
read data at path
[ "read", "data", "at", "path" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L617-L627
miccoli/pyownet
src/pyownet/protocol.py
_Proxy.write
def write(self, path, data, offset=0, timeout=0): """write data at path path is a string, data binary; it is responsability of the caller ensure proper encoding. """ # fixme: check of path type delayed to str2bytez if not isinstance(data, (bytes, bytearray, )): raise TypeError("'data' argument must be binary") ret, rdata = self.sendmess(MSG_WRITE, str2bytez(path) + data, size=len(data), offset=offset, timeout=timeout) assert not rdata, (ret, rdata) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path)
python
def write(self, path, data, offset=0, timeout=0): """write data at path path is a string, data binary; it is responsability of the caller ensure proper encoding. """ # fixme: check of path type delayed to str2bytez if not isinstance(data, (bytes, bytearray, )): raise TypeError("'data' argument must be binary") ret, rdata = self.sendmess(MSG_WRITE, str2bytez(path) + data, size=len(data), offset=offset, timeout=timeout) assert not rdata, (ret, rdata) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path)
[ "def", "write", "(", "self", ",", "path", ",", "data", ",", "offset", "=", "0", ",", "timeout", "=", "0", ")", ":", "# fixme: check of path type delayed to str2bytez", "if", "not", "isinstance", "(", "data", ",", "(", "bytes", ",", "bytearray", ",", ")", ")", ":", "raise", "TypeError", "(", "\"'data' argument must be binary\"", ")", "ret", ",", "rdata", "=", "self", ".", "sendmess", "(", "MSG_WRITE", ",", "str2bytez", "(", "path", ")", "+", "data", ",", "size", "=", "len", "(", "data", ")", ",", "offset", "=", "offset", ",", "timeout", "=", "timeout", ")", "assert", "not", "rdata", ",", "(", "ret", ",", "rdata", ")", "if", "ret", "<", "0", ":", "raise", "OwnetError", "(", "-", "ret", ",", "self", ".", "errmess", "[", "-", "ret", "]", ",", "path", ")" ]
write data at path path is a string, data binary; it is responsability of the caller ensure proper encoding.
[ "write", "data", "at", "path" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L629-L645
miccoli/pyownet
src/pyownet/protocol.py
_PersistentProxy.sendmess
def sendmess(self, msgtype, payload, flags=0, size=0, offset=0, timeout=0): """ retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data """ # reuse last valid connection or create new conn = self.conn or self._new_connection() # invalidate last connection self.conn = None flags |= self.flags assert (flags & FLG_PERSISTENCE) ret, rflags, data = conn.req( msgtype, payload, flags, size, offset, timeout) if rflags & FLG_PERSISTENCE: # persistence granted, save connection object for reuse self.conn = conn else: # discard connection object conn.shutdown() return ret, data
python
def sendmess(self, msgtype, payload, flags=0, size=0, offset=0, timeout=0): """ retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data """ # reuse last valid connection or create new conn = self.conn or self._new_connection() # invalidate last connection self.conn = None flags |= self.flags assert (flags & FLG_PERSISTENCE) ret, rflags, data = conn.req( msgtype, payload, flags, size, offset, timeout) if rflags & FLG_PERSISTENCE: # persistence granted, save connection object for reuse self.conn = conn else: # discard connection object conn.shutdown() return ret, data
[ "def", "sendmess", "(", "self", ",", "msgtype", ",", "payload", ",", "flags", "=", "0", ",", "size", "=", "0", ",", "offset", "=", "0", ",", "timeout", "=", "0", ")", ":", "# reuse last valid connection or create new", "conn", "=", "self", ".", "conn", "or", "self", ".", "_new_connection", "(", ")", "# invalidate last connection", "self", ".", "conn", "=", "None", "flags", "|=", "self", ".", "flags", "assert", "(", "flags", "&", "FLG_PERSISTENCE", ")", "ret", ",", "rflags", ",", "data", "=", "conn", ".", "req", "(", "msgtype", ",", "payload", ",", "flags", ",", "size", ",", "offset", ",", "timeout", ")", "if", "rflags", "&", "FLG_PERSISTENCE", ":", "# persistence granted, save connection object for reuse", "self", ".", "conn", "=", "conn", "else", ":", "# discard connection object", "conn", ".", "shutdown", "(", ")", "return", "ret", ",", "data" ]
retcode, data = sendmess(msgtype, payload) send generic message and returns retcode, data
[ "retcode", "data", "=", "sendmess", "(", "msgtype", "payload", ")", "send", "generic", "message", "and", "returns", "retcode", "data" ]
train
https://github.com/miccoli/pyownet/blob/190afea6a72705772b942d7929bc0aa6561043e0/src/pyownet/protocol.py#L679-L701
SeanOC/sharpy
sharpy/product.py
CheddarProduct.get_customers
def get_customers(self, filter_data=None): ''' Returns all customers. Sometimes they are too much and cause internal server errors on CG. API call permits post parameters for filtering which tends to fix this https://cheddargetter.com/developers#all-customers filter_data Will be processed by urlencode and can be used for filtering Example value: [ ("subscriptionStatus": "activeOnly"), ("planCode[]": "100GB"), ("planCode[]": "200GB") ] ''' customers = [] try: response = self.client.make_request(path='customers/get', data=filter_data) except NotFound: response = None if response: customer_parser = CustomersParser() customers_data = customer_parser.parse_xml(response.content) for customer_data in customers_data: customers.append(Customer(product=self, **customer_data)) return customers
python
def get_customers(self, filter_data=None): ''' Returns all customers. Sometimes they are too much and cause internal server errors on CG. API call permits post parameters for filtering which tends to fix this https://cheddargetter.com/developers#all-customers filter_data Will be processed by urlencode and can be used for filtering Example value: [ ("subscriptionStatus": "activeOnly"), ("planCode[]": "100GB"), ("planCode[]": "200GB") ] ''' customers = [] try: response = self.client.make_request(path='customers/get', data=filter_data) except NotFound: response = None if response: customer_parser = CustomersParser() customers_data = customer_parser.parse_xml(response.content) for customer_data in customers_data: customers.append(Customer(product=self, **customer_data)) return customers
[ "def", "get_customers", "(", "self", ",", "filter_data", "=", "None", ")", ":", "customers", "=", "[", "]", "try", ":", "response", "=", "self", ".", "client", ".", "make_request", "(", "path", "=", "'customers/get'", ",", "data", "=", "filter_data", ")", "except", "NotFound", ":", "response", "=", "None", "if", "response", ":", "customer_parser", "=", "CustomersParser", "(", ")", "customers_data", "=", "customer_parser", ".", "parse_xml", "(", "response", ".", "content", ")", "for", "customer_data", "in", "customers_data", ":", "customers", ".", "append", "(", "Customer", "(", "product", "=", "self", ",", "*", "*", "customer_data", ")", ")", "return", "customers" ]
Returns all customers. Sometimes they are too much and cause internal server errors on CG. API call permits post parameters for filtering which tends to fix this https://cheddargetter.com/developers#all-customers filter_data Will be processed by urlencode and can be used for filtering Example value: [ ("subscriptionStatus": "activeOnly"), ("planCode[]": "100GB"), ("planCode[]": "200GB") ]
[ "Returns", "all", "customers", ".", "Sometimes", "they", "are", "too", "much", "and", "cause", "internal", "server", "errors", "on", "CG", ".", "API", "call", "permits", "post", "parameters", "for", "filtering", "which", "tends", "to", "fix", "this", "https", ":", "//", "cheddargetter", ".", "com", "/", "developers#all", "-", "customers" ]
train
https://github.com/SeanOC/sharpy/blob/935943ca86034255f0a93c1a84734814be176ed4/sharpy/product.py#L214-L241
SeanOC/sharpy
sharpy/product.py
CheddarProduct.delete_all_customers
def delete_all_customers(self): ''' This method does exactly what you think it does. Calling this method deletes all customer data in your cheddar product and the configured gateway. This action cannot be undone. DO NOT RUN THIS UNLESS YOU REALLY, REALLY, REALLY MEAN TO! ''' response = self.client.make_request( path='customers/delete-all/confirm/%d' % int(time()), method='POST' )
python
def delete_all_customers(self): ''' This method does exactly what you think it does. Calling this method deletes all customer data in your cheddar product and the configured gateway. This action cannot be undone. DO NOT RUN THIS UNLESS YOU REALLY, REALLY, REALLY MEAN TO! ''' response = self.client.make_request( path='customers/delete-all/confirm/%d' % int(time()), method='POST' )
[ "def", "delete_all_customers", "(", "self", ")", ":", "response", "=", "self", ".", "client", ".", "make_request", "(", "path", "=", "'customers/delete-all/confirm/%d'", "%", "int", "(", "time", "(", ")", ")", ",", "method", "=", "'POST'", ")" ]
This method does exactly what you think it does. Calling this method deletes all customer data in your cheddar product and the configured gateway. This action cannot be undone. DO NOT RUN THIS UNLESS YOU REALLY, REALLY, REALLY MEAN TO!
[ "This", "method", "does", "exactly", "what", "you", "think", "it", "does", ".", "Calling", "this", "method", "deletes", "all", "customer", "data", "in", "your", "cheddar", "product", "and", "the", "configured", "gateway", ".", "This", "action", "cannot", "be", "undone", ".", "DO", "NOT", "RUN", "THIS", "UNLESS", "YOU", "REALLY", "REALLY", "REALLY", "MEAN", "TO!" ]
train
https://github.com/SeanOC/sharpy/blob/935943ca86034255f0a93c1a84734814be176ed4/sharpy/product.py#L254-L265
SeanOC/sharpy
sharpy/product.py
PricingPlan.initial_bill_date
def initial_bill_date(self): ''' An estimated initial bill date for an account created today, based on available plan info. ''' time_to_start = None if self.initial_bill_count_unit == 'months': time_to_start = relativedelta(months=self.initial_bill_count) else: time_to_start = relativedelta(days=self.initial_bill_count) initial_bill_date = datetime.utcnow().date() + time_to_start return initial_bill_date
python
def initial_bill_date(self): ''' An estimated initial bill date for an account created today, based on available plan info. ''' time_to_start = None if self.initial_bill_count_unit == 'months': time_to_start = relativedelta(months=self.initial_bill_count) else: time_to_start = relativedelta(days=self.initial_bill_count) initial_bill_date = datetime.utcnow().date() + time_to_start return initial_bill_date
[ "def", "initial_bill_date", "(", "self", ")", ":", "time_to_start", "=", "None", "if", "self", ".", "initial_bill_count_unit", "==", "'months'", ":", "time_to_start", "=", "relativedelta", "(", "months", "=", "self", ".", "initial_bill_count", ")", "else", ":", "time_to_start", "=", "relativedelta", "(", "days", "=", "self", ".", "initial_bill_count", ")", "initial_bill_date", "=", "datetime", ".", "utcnow", "(", ")", ".", "date", "(", ")", "+", "time_to_start", "return", "initial_bill_date" ]
An estimated initial bill date for an account created today, based on available plan info.
[ "An", "estimated", "initial", "bill", "date", "for", "an", "account", "created", "today", "based", "on", "available", "plan", "info", "." ]
train
https://github.com/SeanOC/sharpy/blob/935943ca86034255f0a93c1a84734814be176ed4/sharpy/product.py#L331-L345
SeanOC/sharpy
sharpy/product.py
Customer.charge
def charge(self, code, each_amount, quantity=1, description=None): ''' Add an arbitrary charge or credit to a customer's account. A positive number will create a charge. A negative number will create a credit. each_amount is normalized to a Decimal with a precision of 2 as that is the level of precision which the cheddar API supports. ''' each_amount = Decimal(each_amount) each_amount = each_amount.quantize(Decimal('.01')) data = { 'chargeCode': code, 'eachAmount': '%.2f' % each_amount, 'quantity': quantity, } if description: data['description'] = description response = self.product.client.make_request( path='customers/add-charge', params={'code': self.code}, data=data, ) return self.load_data_from_xml(response.content)
python
def charge(self, code, each_amount, quantity=1, description=None): ''' Add an arbitrary charge or credit to a customer's account. A positive number will create a charge. A negative number will create a credit. each_amount is normalized to a Decimal with a precision of 2 as that is the level of precision which the cheddar API supports. ''' each_amount = Decimal(each_amount) each_amount = each_amount.quantize(Decimal('.01')) data = { 'chargeCode': code, 'eachAmount': '%.2f' % each_amount, 'quantity': quantity, } if description: data['description'] = description response = self.product.client.make_request( path='customers/add-charge', params={'code': self.code}, data=data, ) return self.load_data_from_xml(response.content)
[ "def", "charge", "(", "self", ",", "code", ",", "each_amount", ",", "quantity", "=", "1", ",", "description", "=", "None", ")", ":", "each_amount", "=", "Decimal", "(", "each_amount", ")", "each_amount", "=", "each_amount", ".", "quantize", "(", "Decimal", "(", "'.01'", ")", ")", "data", "=", "{", "'chargeCode'", ":", "code", ",", "'eachAmount'", ":", "'%.2f'", "%", "each_amount", ",", "'quantity'", ":", "quantity", ",", "}", "if", "description", ":", "data", "[", "'description'", "]", "=", "description", "response", "=", "self", ".", "product", ".", "client", ".", "make_request", "(", "path", "=", "'customers/add-charge'", ",", "params", "=", "{", "'code'", ":", "self", ".", "code", "}", ",", "data", "=", "data", ",", ")", "return", "self", ".", "load_data_from_xml", "(", "response", ".", "content", ")" ]
Add an arbitrary charge or credit to a customer's account. A positive number will create a charge. A negative number will create a credit. each_amount is normalized to a Decimal with a precision of 2 as that is the level of precision which the cheddar API supports.
[ "Add", "an", "arbitrary", "charge", "or", "credit", "to", "a", "customer", "s", "account", ".", "A", "positive", "number", "will", "create", "a", "charge", ".", "A", "negative", "number", "will", "create", "a", "credit", ".", "each_amount", "is", "normalized", "to", "a", "Decimal", "with", "a", "precision", "of", "2", "as", "that", "is", "the", "level", "of", "precision", "which", "the", "cheddar", "API", "supports", "." ]
train
https://github.com/SeanOC/sharpy/blob/935943ca86034255f0a93c1a84734814be176ed4/sharpy/product.py#L482-L505
SeanOC/sharpy
sharpy/product.py
Customer.create_one_time_invoice
def create_one_time_invoice(self, charges): ''' Charges should be a list of charges to execute immediately. Each value in the charges diectionary should be a dictionary with the following keys: code Your code for this charge. This code will be displayed in the user's invoice and is limited to 36 characters. quantity A positive integer quantity. If not provided this value will default to 1. each_amount Positive or negative integer or decimal with two digit precision. A positive number will create a charge (debit). A negative number will create a credit. description An optional description for this charge which will be displayed on the user's invoice. ''' data = {} for n, charge in enumerate(charges): each_amount = Decimal(charge['each_amount']) each_amount = each_amount.quantize(Decimal('.01')) data['charges[%d][chargeCode]' % n ] = charge['code'] data['charges[%d][quantity]' % n] = charge.get('quantity', 1) data['charges[%d][eachAmount]' % n] = '%.2f' % each_amount if 'description' in charge.keys(): data['charges[%d][description]' % n] = charge['description'] response = self.product.client.make_request( path='invoices/new', params={'code': self.code}, data=data, ) return self.load_data_from_xml(response.content)
python
def create_one_time_invoice(self, charges): ''' Charges should be a list of charges to execute immediately. Each value in the charges diectionary should be a dictionary with the following keys: code Your code for this charge. This code will be displayed in the user's invoice and is limited to 36 characters. quantity A positive integer quantity. If not provided this value will default to 1. each_amount Positive or negative integer or decimal with two digit precision. A positive number will create a charge (debit). A negative number will create a credit. description An optional description for this charge which will be displayed on the user's invoice. ''' data = {} for n, charge in enumerate(charges): each_amount = Decimal(charge['each_amount']) each_amount = each_amount.quantize(Decimal('.01')) data['charges[%d][chargeCode]' % n ] = charge['code'] data['charges[%d][quantity]' % n] = charge.get('quantity', 1) data['charges[%d][eachAmount]' % n] = '%.2f' % each_amount if 'description' in charge.keys(): data['charges[%d][description]' % n] = charge['description'] response = self.product.client.make_request( path='invoices/new', params={'code': self.code}, data=data, ) return self.load_data_from_xml(response.content)
[ "def", "create_one_time_invoice", "(", "self", ",", "charges", ")", ":", "data", "=", "{", "}", "for", "n", ",", "charge", "in", "enumerate", "(", "charges", ")", ":", "each_amount", "=", "Decimal", "(", "charge", "[", "'each_amount'", "]", ")", "each_amount", "=", "each_amount", ".", "quantize", "(", "Decimal", "(", "'.01'", ")", ")", "data", "[", "'charges[%d][chargeCode]'", "%", "n", "]", "=", "charge", "[", "'code'", "]", "data", "[", "'charges[%d][quantity]'", "%", "n", "]", "=", "charge", ".", "get", "(", "'quantity'", ",", "1", ")", "data", "[", "'charges[%d][eachAmount]'", "%", "n", "]", "=", "'%.2f'", "%", "each_amount", "if", "'description'", "in", "charge", ".", "keys", "(", ")", ":", "data", "[", "'charges[%d][description]'", "%", "n", "]", "=", "charge", "[", "'description'", "]", "response", "=", "self", ".", "product", ".", "client", ".", "make_request", "(", "path", "=", "'invoices/new'", ",", "params", "=", "{", "'code'", ":", "self", ".", "code", "}", ",", "data", "=", "data", ",", ")", "return", "self", ".", "load_data_from_xml", "(", "response", ".", "content", ")" ]
Charges should be a list of charges to execute immediately. Each value in the charges diectionary should be a dictionary with the following keys: code Your code for this charge. This code will be displayed in the user's invoice and is limited to 36 characters. quantity A positive integer quantity. If not provided this value will default to 1. each_amount Positive or negative integer or decimal with two digit precision. A positive number will create a charge (debit). A negative number will create a credit. description An optional description for this charge which will be displayed on the user's invoice.
[ "Charges", "should", "be", "a", "list", "of", "charges", "to", "execute", "immediately", ".", "Each", "value", "in", "the", "charges", "diectionary", "should", "be", "a", "dictionary", "with", "the", "following", "keys", ":" ]
train
https://github.com/SeanOC/sharpy/blob/935943ca86034255f0a93c1a84734814be176ed4/sharpy/product.py#L507-L542
SeanOC/sharpy
sharpy/product.py
Item.set
def set(self, quantity): ''' Set the item's quantity to the passed in amount. If nothing is passed in, a quantity of 1 is assumed. If a decimal value is passsed in, it is rounded to the 4th decimal place as that is the level of precision which the Cheddar API accepts. ''' data = {} data['quantity'] = self._normalize_quantity(quantity) response = self.subscription.customer.product.client.make_request( path = 'customers/set-item-quantity', params = { 'code': self.subscription.customer.code, 'itemCode': self.code, }, data = data, method = 'POST', ) return self.subscription.customer.load_data_from_xml(response.content)
python
def set(self, quantity): ''' Set the item's quantity to the passed in amount. If nothing is passed in, a quantity of 1 is assumed. If a decimal value is passsed in, it is rounded to the 4th decimal place as that is the level of precision which the Cheddar API accepts. ''' data = {} data['quantity'] = self._normalize_quantity(quantity) response = self.subscription.customer.product.client.make_request( path = 'customers/set-item-quantity', params = { 'code': self.subscription.customer.code, 'itemCode': self.code, }, data = data, method = 'POST', ) return self.subscription.customer.load_data_from_xml(response.content)
[ "def", "set", "(", "self", ",", "quantity", ")", ":", "data", "=", "{", "}", "data", "[", "'quantity'", "]", "=", "self", ".", "_normalize_quantity", "(", "quantity", ")", "response", "=", "self", ".", "subscription", ".", "customer", ".", "product", ".", "client", ".", "make_request", "(", "path", "=", "'customers/set-item-quantity'", ",", "params", "=", "{", "'code'", ":", "self", ".", "subscription", ".", "customer", ".", "code", ",", "'itemCode'", ":", "self", ".", "code", ",", "}", ",", "data", "=", "data", ",", "method", "=", "'POST'", ",", ")", "return", "self", ".", "subscription", ".", "customer", ".", "load_data_from_xml", "(", "response", ".", "content", ")" ]
Set the item's quantity to the passed in amount. If nothing is passed in, a quantity of 1 is assumed. If a decimal value is passsed in, it is rounded to the 4th decimal place as that is the level of precision which the Cheddar API accepts.
[ "Set", "the", "item", "s", "quantity", "to", "the", "passed", "in", "amount", ".", "If", "nothing", "is", "passed", "in", "a", "quantity", "of", "1", "is", "assumed", ".", "If", "a", "decimal", "value", "is", "passsed", "in", "it", "is", "rounded", "to", "the", "4th", "decimal", "place", "as", "that", "is", "the", "level", "of", "precision", "which", "the", "Cheddar", "API", "accepts", "." ]
train
https://github.com/SeanOC/sharpy/blob/935943ca86034255f0a93c1a84734814be176ed4/sharpy/product.py#L746-L766
rliebz/whoswho
whoswho/model.py
Name.deep_compare
def deep_compare(self, other, settings): """ Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return bool: whether the two names are compatible """ if not self._is_compatible_with(other): return False first, middle, last = self._compare_components(other, settings) return first and middle and last
python
def deep_compare(self, other, settings): """ Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return bool: whether the two names are compatible """ if not self._is_compatible_with(other): return False first, middle, last = self._compare_components(other, settings) return first and middle and last
[ "def", "deep_compare", "(", "self", ",", "other", ",", "settings", ")", ":", "if", "not", "self", ".", "_is_compatible_with", "(", "other", ")", ":", "return", "False", "first", ",", "middle", ",", "last", "=", "self", ".", "_compare_components", "(", "other", ",", "settings", ")", "return", "first", "and", "middle", "and", "last" ]
Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return bool: whether the two names are compatible
[ "Compares", "each", "field", "of", "the", "name", "one", "at", "a", "time", "to", "see", "if", "they", "match", ".", "Each", "name", "field", "has", "context", "-", "specific", "comparison", "logic", "." ]
train
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/model.py#L25-L39
rliebz/whoswho
whoswho/model.py
Name.ratio_deep_compare
def ratio_deep_compare(self, other, settings): """ Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return int: sequence ratio match (out of 100) """ if not self._is_compatible_with(other): return 0 first, middle, last = self._compare_components(other, settings, True) f_weight, m_weight, l_weight = self._determine_weights(other, settings) total_weight = f_weight + m_weight + l_weight result = ( first * f_weight + middle * m_weight + last * l_weight ) / total_weight return result
python
def ratio_deep_compare(self, other, settings): """ Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return int: sequence ratio match (out of 100) """ if not self._is_compatible_with(other): return 0 first, middle, last = self._compare_components(other, settings, True) f_weight, m_weight, l_weight = self._determine_weights(other, settings) total_weight = f_weight + m_weight + l_weight result = ( first * f_weight + middle * m_weight + last * l_weight ) / total_weight return result
[ "def", "ratio_deep_compare", "(", "self", ",", "other", ",", "settings", ")", ":", "if", "not", "self", ".", "_is_compatible_with", "(", "other", ")", ":", "return", "0", "first", ",", "middle", ",", "last", "=", "self", ".", "_compare_components", "(", "other", ",", "settings", ",", "True", ")", "f_weight", ",", "m_weight", ",", "l_weight", "=", "self", ".", "_determine_weights", "(", "other", ",", "settings", ")", "total_weight", "=", "f_weight", "+", "m_weight", "+", "l_weight", "result", "=", "(", "first", "*", "f_weight", "+", "middle", "*", "m_weight", "+", "last", "*", "l_weight", ")", "/", "total_weight", "return", "result" ]
Compares each field of the name one at a time to see if they match. Each name field has context-specific comparison logic. :param Name other: other Name for comparison :return int: sequence ratio match (out of 100)
[ "Compares", "each", "field", "of", "the", "name", "one", "at", "a", "time", "to", "see", "if", "they", "match", ".", "Each", "name", "field", "has", "context", "-", "specific", "comparison", "logic", "." ]
train
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/model.py#L41-L63
rliebz/whoswho
whoswho/model.py
Name._is_compatible_with
def _is_compatible_with(self, other): """ Return True if names are not incompatible. This checks that the gender of titles and compatibility of suffixes """ title = self._compare_title(other) suffix = self._compare_suffix(other) return title and suffix
python
def _is_compatible_with(self, other): """ Return True if names are not incompatible. This checks that the gender of titles and compatibility of suffixes """ title = self._compare_title(other) suffix = self._compare_suffix(other) return title and suffix
[ "def", "_is_compatible_with", "(", "self", ",", "other", ")", ":", "title", "=", "self", ".", "_compare_title", "(", "other", ")", "suffix", "=", "self", ".", "_compare_suffix", "(", "other", ")", "return", "title", "and", "suffix" ]
Return True if names are not incompatible. This checks that the gender of titles and compatibility of suffixes
[ "Return", "True", "if", "names", "are", "not", "incompatible", "." ]
train
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/model.py#L65-L75
rliebz/whoswho
whoswho/model.py
Name._compare_title
def _compare_title(self, other): """Return False if titles have different gender associations""" # If title is omitted, assume a match if not self.title or not other.title: return True titles = set(self.title_list + other.title_list) return not (titles & MALE_TITLES and titles & FEMALE_TITLES)
python
def _compare_title(self, other): """Return False if titles have different gender associations""" # If title is omitted, assume a match if not self.title or not other.title: return True titles = set(self.title_list + other.title_list) return not (titles & MALE_TITLES and titles & FEMALE_TITLES)
[ "def", "_compare_title", "(", "self", ",", "other", ")", ":", "# If title is omitted, assume a match", "if", "not", "self", ".", "title", "or", "not", "other", ".", "title", ":", "return", "True", "titles", "=", "set", "(", "self", ".", "title_list", "+", "other", ".", "title_list", ")", "return", "not", "(", "titles", "&", "MALE_TITLES", "and", "titles", "&", "FEMALE_TITLES", ")" ]
Return False if titles have different gender associations
[ "Return", "False", "if", "titles", "have", "different", "gender", "associations" ]
train
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/model.py#L77-L86
rliebz/whoswho
whoswho/model.py
Name._compare_suffix
def _compare_suffix(self, other): """Return false if suffixes are mutually exclusive""" # If suffix is omitted, assume a match if not self.suffix or not other.suffix: return True # Check if more than one unique suffix suffix_set = set(self.suffix_list + other.suffix_list) unique_suffixes = suffix_set & UNIQUE_SUFFIXES for key in EQUIVALENT_SUFFIXES: if key in unique_suffixes: unique_suffixes.remove(key) unique_suffixes.add(EQUIVALENT_SUFFIXES[key]) return len(unique_suffixes) < 2
python
def _compare_suffix(self, other): """Return false if suffixes are mutually exclusive""" # If suffix is omitted, assume a match if not self.suffix or not other.suffix: return True # Check if more than one unique suffix suffix_set = set(self.suffix_list + other.suffix_list) unique_suffixes = suffix_set & UNIQUE_SUFFIXES for key in EQUIVALENT_SUFFIXES: if key in unique_suffixes: unique_suffixes.remove(key) unique_suffixes.add(EQUIVALENT_SUFFIXES[key]) return len(unique_suffixes) < 2
[ "def", "_compare_suffix", "(", "self", ",", "other", ")", ":", "# If suffix is omitted, assume a match", "if", "not", "self", ".", "suffix", "or", "not", "other", ".", "suffix", ":", "return", "True", "# Check if more than one unique suffix", "suffix_set", "=", "set", "(", "self", ".", "suffix_list", "+", "other", ".", "suffix_list", ")", "unique_suffixes", "=", "suffix_set", "&", "UNIQUE_SUFFIXES", "for", "key", "in", "EQUIVALENT_SUFFIXES", ":", "if", "key", "in", "unique_suffixes", ":", "unique_suffixes", ".", "remove", "(", "key", ")", "unique_suffixes", ".", "add", "(", "EQUIVALENT_SUFFIXES", "[", "key", "]", ")", "return", "len", "(", "unique_suffixes", ")", "<", "2" ]
Return false if suffixes are mutually exclusive
[ "Return", "false", "if", "suffixes", "are", "mutually", "exclusive" ]
train
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/model.py#L88-L103
rliebz/whoswho
whoswho/model.py
Name._compare_components
def _compare_components(self, other, settings, ratio=False): """Return comparison of first, middle, and last components""" first = compare_name_component( self.first_list, other.first_list, settings['first'], ratio, ) if settings['check_nickname']: if first is False: first = compare_name_component( self.nickname_list, other.first_list, settings['first'], ratio ) or compare_name_component( self.first_list, other.nickname_list, settings['first'], ratio ) elif ratio and first is not 100: first = max( compare_name_component( self.nickname_list, other.first_list, settings['first'], ratio ), compare_name_component( self.first_list, other.nickname_list, settings['first'], ratio ), first, ) middle = compare_name_component( self.middle_list, other.middle_list, settings['middle'], ratio, ) last = compare_name_component( self.last_list, other.last_list, settings['last'], ratio, ) return first, middle, last
python
def _compare_components(self, other, settings, ratio=False): """Return comparison of first, middle, and last components""" first = compare_name_component( self.first_list, other.first_list, settings['first'], ratio, ) if settings['check_nickname']: if first is False: first = compare_name_component( self.nickname_list, other.first_list, settings['first'], ratio ) or compare_name_component( self.first_list, other.nickname_list, settings['first'], ratio ) elif ratio and first is not 100: first = max( compare_name_component( self.nickname_list, other.first_list, settings['first'], ratio ), compare_name_component( self.first_list, other.nickname_list, settings['first'], ratio ), first, ) middle = compare_name_component( self.middle_list, other.middle_list, settings['middle'], ratio, ) last = compare_name_component( self.last_list, other.last_list, settings['last'], ratio, ) return first, middle, last
[ "def", "_compare_components", "(", "self", ",", "other", ",", "settings", ",", "ratio", "=", "False", ")", ":", "first", "=", "compare_name_component", "(", "self", ".", "first_list", ",", "other", ".", "first_list", ",", "settings", "[", "'first'", "]", ",", "ratio", ",", ")", "if", "settings", "[", "'check_nickname'", "]", ":", "if", "first", "is", "False", ":", "first", "=", "compare_name_component", "(", "self", ".", "nickname_list", ",", "other", ".", "first_list", ",", "settings", "[", "'first'", "]", ",", "ratio", ")", "or", "compare_name_component", "(", "self", ".", "first_list", ",", "other", ".", "nickname_list", ",", "settings", "[", "'first'", "]", ",", "ratio", ")", "elif", "ratio", "and", "first", "is", "not", "100", ":", "first", "=", "max", "(", "compare_name_component", "(", "self", ".", "nickname_list", ",", "other", ".", "first_list", ",", "settings", "[", "'first'", "]", ",", "ratio", ")", ",", "compare_name_component", "(", "self", ".", "first_list", ",", "other", ".", "nickname_list", ",", "settings", "[", "'first'", "]", ",", "ratio", ")", ",", "first", ",", ")", "middle", "=", "compare_name_component", "(", "self", ".", "middle_list", ",", "other", ".", "middle_list", ",", "settings", "[", "'middle'", "]", ",", "ratio", ",", ")", "last", "=", "compare_name_component", "(", "self", ".", "last_list", ",", "other", ".", "last_list", ",", "settings", "[", "'last'", "]", ",", "ratio", ",", ")", "return", "first", ",", "middle", ",", "last" ]
Return comparison of first, middle, and last components
[ "Return", "comparison", "of", "first", "middle", "and", "last", "components" ]
train
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/model.py#L105-L159
rliebz/whoswho
whoswho/model.py
Name._determine_weights
def _determine_weights(self, other, settings): """ Return weights of name components based on whether or not they were omitted """ # TODO: Reduce weight for matches by prefix or initials first_is_used = settings['first']['required'] or \ self.first and other.first first_weight = settings['first']['weight'] if first_is_used else 0 middle_is_used = settings['middle']['required'] or \ self.middle and other.middle middle_weight = settings['middle']['weight'] if middle_is_used else 0 last_is_used = settings['last']['required'] or \ self.last and other.last last_weight = settings['last']['weight'] if last_is_used else 0 return first_weight, middle_weight, last_weight
python
def _determine_weights(self, other, settings): """ Return weights of name components based on whether or not they were omitted """ # TODO: Reduce weight for matches by prefix or initials first_is_used = settings['first']['required'] or \ self.first and other.first first_weight = settings['first']['weight'] if first_is_used else 0 middle_is_used = settings['middle']['required'] or \ self.middle and other.middle middle_weight = settings['middle']['weight'] if middle_is_used else 0 last_is_used = settings['last']['required'] or \ self.last and other.last last_weight = settings['last']['weight'] if last_is_used else 0 return first_weight, middle_weight, last_weight
[ "def", "_determine_weights", "(", "self", ",", "other", ",", "settings", ")", ":", "# TODO: Reduce weight for matches by prefix or initials", "first_is_used", "=", "settings", "[", "'first'", "]", "[", "'required'", "]", "or", "self", ".", "first", "and", "other", ".", "first", "first_weight", "=", "settings", "[", "'first'", "]", "[", "'weight'", "]", "if", "first_is_used", "else", "0", "middle_is_used", "=", "settings", "[", "'middle'", "]", "[", "'required'", "]", "or", "self", ".", "middle", "and", "other", ".", "middle", "middle_weight", "=", "settings", "[", "'middle'", "]", "[", "'weight'", "]", "if", "middle_is_used", "else", "0", "last_is_used", "=", "settings", "[", "'last'", "]", "[", "'required'", "]", "or", "self", ".", "last", "and", "other", ".", "last", "last_weight", "=", "settings", "[", "'last'", "]", "[", "'weight'", "]", "if", "last_is_used", "else", "0", "return", "first_weight", ",", "middle_weight", ",", "last_weight" ]
Return weights of name components based on whether or not they were omitted
[ "Return", "weights", "of", "name", "components", "based", "on", "whether", "or", "not", "they", "were", "omitted" ]
train
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/model.py#L161-L181
bohea/sanic-limiter
sanic_limiter/extension.py
Limiter.init_app
def init_app(self, app): """ :param app: :class:`sanic.Sanic` instance to rate limit. """ self.enabled = app.config.setdefault(C.ENABLED, True) self._swallow_errors = app.config.setdefault( C.SWALLOW_ERRORS, self._swallow_errors ) self._storage_options.update( app.config.get(C.STORAGE_OPTIONS, {}) ) self._storage = storage_from_string( self._storage_uri or app.config.setdefault(C.STORAGE_URL, 'memory://'), **self._storage_options ) strategy = ( self._strategy or app.config.setdefault(C.STRATEGY, 'fixed-window') ) if strategy not in STRATEGIES: raise ConfigurationError("Invalid rate limiting strategy %s" % strategy) self._limiter = STRATEGIES[strategy](self._storage) conf_limits = app.config.get(C.GLOBAL_LIMITS, None) if not self._global_limits and conf_limits: self._global_limits = [ ExtLimit( limit, self._key_func, None, False, None, None, None ) for limit in parse_many(conf_limits) ] app.request_middleware.append(self.__check_request_limit)
python
def init_app(self, app): """ :param app: :class:`sanic.Sanic` instance to rate limit. """ self.enabled = app.config.setdefault(C.ENABLED, True) self._swallow_errors = app.config.setdefault( C.SWALLOW_ERRORS, self._swallow_errors ) self._storage_options.update( app.config.get(C.STORAGE_OPTIONS, {}) ) self._storage = storage_from_string( self._storage_uri or app.config.setdefault(C.STORAGE_URL, 'memory://'), **self._storage_options ) strategy = ( self._strategy or app.config.setdefault(C.STRATEGY, 'fixed-window') ) if strategy not in STRATEGIES: raise ConfigurationError("Invalid rate limiting strategy %s" % strategy) self._limiter = STRATEGIES[strategy](self._storage) conf_limits = app.config.get(C.GLOBAL_LIMITS, None) if not self._global_limits and conf_limits: self._global_limits = [ ExtLimit( limit, self._key_func, None, False, None, None, None ) for limit in parse_many(conf_limits) ] app.request_middleware.append(self.__check_request_limit)
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "self", ".", "enabled", "=", "app", ".", "config", ".", "setdefault", "(", "C", ".", "ENABLED", ",", "True", ")", "self", ".", "_swallow_errors", "=", "app", ".", "config", ".", "setdefault", "(", "C", ".", "SWALLOW_ERRORS", ",", "self", ".", "_swallow_errors", ")", "self", ".", "_storage_options", ".", "update", "(", "app", ".", "config", ".", "get", "(", "C", ".", "STORAGE_OPTIONS", ",", "{", "}", ")", ")", "self", ".", "_storage", "=", "storage_from_string", "(", "self", ".", "_storage_uri", "or", "app", ".", "config", ".", "setdefault", "(", "C", ".", "STORAGE_URL", ",", "'memory://'", ")", ",", "*", "*", "self", ".", "_storage_options", ")", "strategy", "=", "(", "self", ".", "_strategy", "or", "app", ".", "config", ".", "setdefault", "(", "C", ".", "STRATEGY", ",", "'fixed-window'", ")", ")", "if", "strategy", "not", "in", "STRATEGIES", ":", "raise", "ConfigurationError", "(", "\"Invalid rate limiting strategy %s\"", "%", "strategy", ")", "self", ".", "_limiter", "=", "STRATEGIES", "[", "strategy", "]", "(", "self", ".", "_storage", ")", "conf_limits", "=", "app", ".", "config", ".", "get", "(", "C", ".", "GLOBAL_LIMITS", ",", "None", ")", "if", "not", "self", ".", "_global_limits", "and", "conf_limits", ":", "self", ".", "_global_limits", "=", "[", "ExtLimit", "(", "limit", ",", "self", ".", "_key_func", ",", "None", ",", "False", ",", "None", ",", "None", ",", "None", ")", "for", "limit", "in", "parse_many", "(", "conf_limits", ")", "]", "app", ".", "request_middleware", ".", "append", "(", "self", ".", "__check_request_limit", ")" ]
:param app: :class:`sanic.Sanic` instance to rate limit.
[ ":", "param", "app", ":", ":", "class", ":", "sanic", ".", "Sanic", "instance", "to", "rate", "limit", "." ]
train
https://github.com/bohea/sanic-limiter/blob/54c9fc4a3a3f1a9bb69367262637d07701ae5694/sanic_limiter/extension.py#L116-L147
bohea/sanic-limiter
sanic_limiter/extension.py
Limiter.limit
def limit(self, limit_value, key_func=None, per_method=False, methods=None, error_message=None, exempt_when=None): """ decorator to be used for rate limiting individual routes. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param bool per_method: whether the limit is sub categorized into the http method of the request. :param list methods: if specified, only the methods in this list will be rate limited (default: None). :param error_message: string (or callable that returns one) to override the error message used in the response. :return: """ return self.__limit_decorator(limit_value, key_func, per_method=per_method, methods=methods, error_message=error_message, exempt_when=exempt_when)
python
def limit(self, limit_value, key_func=None, per_method=False, methods=None, error_message=None, exempt_when=None): """ decorator to be used for rate limiting individual routes. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param bool per_method: whether the limit is sub categorized into the http method of the request. :param list methods: if specified, only the methods in this list will be rate limited (default: None). :param error_message: string (or callable that returns one) to override the error message used in the response. :return: """ return self.__limit_decorator(limit_value, key_func, per_method=per_method, methods=methods, error_message=error_message, exempt_when=exempt_when)
[ "def", "limit", "(", "self", ",", "limit_value", ",", "key_func", "=", "None", ",", "per_method", "=", "False", ",", "methods", "=", "None", ",", "error_message", "=", "None", ",", "exempt_when", "=", "None", ")", ":", "return", "self", ".", "__limit_decorator", "(", "limit_value", ",", "key_func", ",", "per_method", "=", "per_method", ",", "methods", "=", "methods", ",", "error_message", "=", "error_message", ",", "exempt_when", "=", "exempt_when", ")" ]
decorator to be used for rate limiting individual routes. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param bool per_method: whether the limit is sub categorized into the http method of the request. :param list methods: if specified, only the methods in this list will be rate limited (default: None). :param error_message: string (or callable that returns one) to override the error message used in the response. :return:
[ "decorator", "to", "be", "used", "for", "rate", "limiting", "individual", "routes", "." ]
train
https://github.com/bohea/sanic-limiter/blob/54c9fc4a3a3f1a9bb69367262637d07701ae5694/sanic_limiter/extension.py#L297-L316
bohea/sanic-limiter
sanic_limiter/extension.py
Limiter.shared_limit
def shared_limit(self, limit_value, scope, key_func=None, error_message=None, exempt_when=None): """ decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param error_message: string (or callable that returns one) to override the error message used in the response. """ return self.__limit_decorator( limit_value, key_func, True, scope, error_message=error_message, exempt_when=exempt_when )
python
def shared_limit(self, limit_value, scope, key_func=None, error_message=None, exempt_when=None): """ decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param error_message: string (or callable that returns one) to override the error message used in the response. """ return self.__limit_decorator( limit_value, key_func, True, scope, error_message=error_message, exempt_when=exempt_when )
[ "def", "shared_limit", "(", "self", ",", "limit_value", ",", "scope", ",", "key_func", "=", "None", ",", "error_message", "=", "None", ",", "exempt_when", "=", "None", ")", ":", "return", "self", ".", "__limit_decorator", "(", "limit_value", ",", "key_func", ",", "True", ",", "scope", ",", "error_message", "=", "error_message", ",", "exempt_when", "=", "exempt_when", ")" ]
decorator to be applied to multiple routes sharing the same rate limit. :param limit_value: rate limit string or a callable that returns a string. :ref:`ratelimit-string` for more details. :param scope: a string or callable that returns a string for defining the rate limiting scope. :param function key_func: function/lambda to extract the unique identifier for the rate limit. defaults to remote address of the request. :param error_message: string (or callable that returns one) to override the error message used in the response.
[ "decorator", "to", "be", "applied", "to", "multiple", "routes", "sharing", "the", "same", "rate", "limit", "." ]
train
https://github.com/bohea/sanic-limiter/blob/54c9fc4a3a3f1a9bb69367262637d07701ae5694/sanic_limiter/extension.py#L318-L335
bohea/sanic-limiter
sanic_limiter/extension.py
Limiter.reset
def reset(self): """ resets the storage if it supports being reset """ try: self._storage.reset() self.logger.info("Storage has been reset and all limits cleared") except NotImplementedError: self.logger.warning("This storage type does not support being reset")
python
def reset(self): """ resets the storage if it supports being reset """ try: self._storage.reset() self.logger.info("Storage has been reset and all limits cleared") except NotImplementedError: self.logger.warning("This storage type does not support being reset")
[ "def", "reset", "(", "self", ")", ":", "try", ":", "self", ".", "_storage", ".", "reset", "(", ")", "self", ".", "logger", ".", "info", "(", "\"Storage has been reset and all limits cleared\"", ")", "except", "NotImplementedError", ":", "self", ".", "logger", ".", "warning", "(", "\"This storage type does not support being reset\"", ")" ]
resets the storage if it supports being reset
[ "resets", "the", "storage", "if", "it", "supports", "being", "reset" ]
train
https://github.com/bohea/sanic-limiter/blob/54c9fc4a3a3f1a9bb69367262637d07701ae5694/sanic_limiter/extension.py#L358-L366
karan/HackerNewsAPI
hn/utils.py
get_soup
def get_soup(page=''): """ Returns a bs4 object of the page requested """ content = requests.get('%s/%s' % (BASE_URL, page)).text return BeautifulSoup(content)
python
def get_soup(page=''): """ Returns a bs4 object of the page requested """ content = requests.get('%s/%s' % (BASE_URL, page)).text return BeautifulSoup(content)
[ "def", "get_soup", "(", "page", "=", "''", ")", ":", "content", "=", "requests", ".", "get", "(", "'%s/%s'", "%", "(", "BASE_URL", ",", "page", ")", ")", ".", "text", "return", "BeautifulSoup", "(", "content", ")" ]
Returns a bs4 object of the page requested
[ "Returns", "a", "bs4", "object", "of", "the", "page", "requested" ]
train
https://github.com/karan/HackerNewsAPI/blob/0e2df2e28f3a6090559eacdefdb99f4d6780ddf5/hn/utils.py#L9-L14
rliebz/whoswho
whoswho/who.py
match
def match(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return bool: the names match """ if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.deep_compare(name2, settings)
python
def match(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return bool: the names match """ if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.deep_compare(name2, settings)
[ "def", "match", "(", "fullname1", ",", "fullname2", ",", "strictness", "=", "'default'", ",", "options", "=", "None", ")", ":", "if", "options", "is", "not", "None", ":", "settings", "=", "deepcopy", "(", "SETTINGS", "[", "strictness", "]", ")", "deep_update_dict", "(", "settings", ",", "options", ")", "else", ":", "settings", "=", "SETTINGS", "[", "strictness", "]", "name1", "=", "Name", "(", "fullname1", ")", "name2", "=", "Name", "(", "fullname2", ")", "return", "name1", ".", "deep_compare", "(", "name2", ",", "settings", ")" ]
Takes two names and returns true if they describe the same person. :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return bool: the names match
[ "Takes", "two", "names", "and", "returns", "true", "if", "they", "describe", "the", "same", "person", "." ]
train
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/who.py#L8-L28
rliebz/whoswho
whoswho/who.py
ratio
def ratio(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. Uses difflib's sequence matching on a per-field basis for names :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return int: sequence ratio match (out of 100) """ if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.ratio_deep_compare(name2, settings)
python
def ratio(fullname1, fullname2, strictness='default', options=None): """ Takes two names and returns true if they describe the same person. Uses difflib's sequence matching on a per-field basis for names :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return int: sequence ratio match (out of 100) """ if options is not None: settings = deepcopy(SETTINGS[strictness]) deep_update_dict(settings, options) else: settings = SETTINGS[strictness] name1 = Name(fullname1) name2 = Name(fullname2) return name1.ratio_deep_compare(name2, settings)
[ "def", "ratio", "(", "fullname1", ",", "fullname2", ",", "strictness", "=", "'default'", ",", "options", "=", "None", ")", ":", "if", "options", "is", "not", "None", ":", "settings", "=", "deepcopy", "(", "SETTINGS", "[", "strictness", "]", ")", "deep_update_dict", "(", "settings", ",", "options", ")", "else", ":", "settings", "=", "SETTINGS", "[", "strictness", "]", "name1", "=", "Name", "(", "fullname1", ")", "name2", "=", "Name", "(", "fullname2", ")", "return", "name1", ".", "ratio_deep_compare", "(", "name2", ",", "settings", ")" ]
Takes two names and returns true if they describe the same person. Uses difflib's sequence matching on a per-field basis for names :param string fullname1: first human name :param string fullname2: second human name :param string strictness: strictness settings to use :param dict options: custom strictness settings updates :return int: sequence ratio match (out of 100)
[ "Takes", "two", "names", "and", "returns", "true", "if", "they", "describe", "the", "same", "person", ".", "Uses", "difflib", "s", "sequence", "matching", "on", "a", "per", "-", "field", "basis", "for", "names" ]
train
https://github.com/rliebz/whoswho/blob/0c411e418c240fcec6ea0a23d15bd003056c65d0/whoswho/who.py#L31-L52
karan/HackerNewsAPI
hn/hn.py
HN._get_zipped_rows
def _get_zipped_rows(self, soup): """ Returns all 'tr' tag rows as a list of tuples. Each tuple is for a single story. """ # the table with all submissions table = soup.findChildren('table')[2] # get all rows but last 2 rows = table.findChildren(['tr'])[:-2] # remove the spacing rows # indices of spacing tr's spacing = range(2, len(rows), 3) rows = [row for (i, row) in enumerate(rows) if (i not in spacing)] # rank, title, domain info = [row for (i, row) in enumerate(rows) if (i % 2 == 0)] # points, submitter, comments detail = [row for (i, row) in enumerate(rows) if (i % 2 != 0)] # build a list of tuple for all post return zip(info, detail)
python
def _get_zipped_rows(self, soup): """ Returns all 'tr' tag rows as a list of tuples. Each tuple is for a single story. """ # the table with all submissions table = soup.findChildren('table')[2] # get all rows but last 2 rows = table.findChildren(['tr'])[:-2] # remove the spacing rows # indices of spacing tr's spacing = range(2, len(rows), 3) rows = [row for (i, row) in enumerate(rows) if (i not in spacing)] # rank, title, domain info = [row for (i, row) in enumerate(rows) if (i % 2 == 0)] # points, submitter, comments detail = [row for (i, row) in enumerate(rows) if (i % 2 != 0)] # build a list of tuple for all post return zip(info, detail)
[ "def", "_get_zipped_rows", "(", "self", ",", "soup", ")", ":", "# the table with all submissions\r", "table", "=", "soup", ".", "findChildren", "(", "'table'", ")", "[", "2", "]", "# get all rows but last 2\r", "rows", "=", "table", ".", "findChildren", "(", "[", "'tr'", "]", ")", "[", ":", "-", "2", "]", "# remove the spacing rows\r", "# indices of spacing tr's\r", "spacing", "=", "range", "(", "2", ",", "len", "(", "rows", ")", ",", "3", ")", "rows", "=", "[", "row", "for", "(", "i", ",", "row", ")", "in", "enumerate", "(", "rows", ")", "if", "(", "i", "not", "in", "spacing", ")", "]", "# rank, title, domain\r", "info", "=", "[", "row", "for", "(", "i", ",", "row", ")", "in", "enumerate", "(", "rows", ")", "if", "(", "i", "%", "2", "==", "0", ")", "]", "# points, submitter, comments\r", "detail", "=", "[", "row", "for", "(", "i", ",", "row", ")", "in", "enumerate", "(", "rows", ")", "if", "(", "i", "%", "2", "!=", "0", ")", "]", "# build a list of tuple for all post\r", "return", "zip", "(", "info", ",", "detail", ")" ]
Returns all 'tr' tag rows as a list of tuples. Each tuple is for a single story.
[ "Returns", "all", "tr", "tag", "rows", "as", "a", "list", "of", "tuples", ".", "Each", "tuple", "is", "for", "a", "single", "story", "." ]
train
https://github.com/karan/HackerNewsAPI/blob/0e2df2e28f3a6090559eacdefdb99f4d6780ddf5/hn/hn.py#L36-L55
karan/HackerNewsAPI
hn/hn.py
HN._build_story
def _build_story(self, all_rows): """ Builds and returns a list of stories (dicts) from the passed source. """ # list to hold all stories all_stories = [] for (info, detail) in all_rows: #-- Get the into about a story --# # split in 3 cells info_cells = info.findAll('td') rank = int(info_cells[0].string[:-1]) title = '%s' % info_cells[2].find('a').string link = info_cells[2].find('a').get('href') # by default all stories are linking posts is_self = False # the link doesn't contains "http" meaning an internal link if link.find('item?id=') is -1: # slice " (abc.com) " domain = info_cells[2].findAll('span')[1].string[2:-1] else: link = '%s/%s' % (BASE_URL, link) domain = BASE_URL is_self = True #-- Get the into about a story --# #-- Get the detail about a story --# # split in 2 cells, we need only second detail_cell = detail.findAll('td')[1] # list of details we need, 5 count detail_concern = detail_cell.contents num_comments = -1 if re.match(r'^(\d+)\spoint.*', detail_concern[0].string) is not \ None: # can be a link or self post points = int(re.match(r'^(\d+)\spoint.*', detail_concern[ 0].string).groups()[0]) submitter = '%s' % detail_concern[2].string submitter_profile = '%s/%s' % (BASE_URL, detail_concern[ 2].get('href')) published_time = ' '.join(detail_concern[3].strip().split()[ :3]) comment_tag = detail_concern[4] story_id = int(re.match(r'.*=(\d+)', comment_tag.get( 'href')).groups()[0]) comments_link = '%s/item?id=%d' % (BASE_URL, story_id) comment_count = re.match(r'(\d+)\s.*', comment_tag.string) try: # regex matched, cast to int num_comments = int(comment_count.groups()[0]) except AttributeError: # did not match, assign 0 num_comments = 0 else: # this is a job post points = 0 submitter = '' submitter_profile = '' published_time = '%s' % detail_concern[0] comment_tag = '' try: story_id = int(re.match(r'.*=(\d+)', link).groups()[0]) except AttributeError: # job listing that points to external link story_id = -1 comments_link = '' comment_count = -1 #-- Get the detail about a story --# story = Story(rank, story_id, title, link, domain, points, submitter, published_time, submitter_profile, num_comments, comments_link, is_self) all_stories.append(story) return all_stories
python
def _build_story(self, all_rows): """ Builds and returns a list of stories (dicts) from the passed source. """ # list to hold all stories all_stories = [] for (info, detail) in all_rows: #-- Get the into about a story --# # split in 3 cells info_cells = info.findAll('td') rank = int(info_cells[0].string[:-1]) title = '%s' % info_cells[2].find('a').string link = info_cells[2].find('a').get('href') # by default all stories are linking posts is_self = False # the link doesn't contains "http" meaning an internal link if link.find('item?id=') is -1: # slice " (abc.com) " domain = info_cells[2].findAll('span')[1].string[2:-1] else: link = '%s/%s' % (BASE_URL, link) domain = BASE_URL is_self = True #-- Get the into about a story --# #-- Get the detail about a story --# # split in 2 cells, we need only second detail_cell = detail.findAll('td')[1] # list of details we need, 5 count detail_concern = detail_cell.contents num_comments = -1 if re.match(r'^(\d+)\spoint.*', detail_concern[0].string) is not \ None: # can be a link or self post points = int(re.match(r'^(\d+)\spoint.*', detail_concern[ 0].string).groups()[0]) submitter = '%s' % detail_concern[2].string submitter_profile = '%s/%s' % (BASE_URL, detail_concern[ 2].get('href')) published_time = ' '.join(detail_concern[3].strip().split()[ :3]) comment_tag = detail_concern[4] story_id = int(re.match(r'.*=(\d+)', comment_tag.get( 'href')).groups()[0]) comments_link = '%s/item?id=%d' % (BASE_URL, story_id) comment_count = re.match(r'(\d+)\s.*', comment_tag.string) try: # regex matched, cast to int num_comments = int(comment_count.groups()[0]) except AttributeError: # did not match, assign 0 num_comments = 0 else: # this is a job post points = 0 submitter = '' submitter_profile = '' published_time = '%s' % detail_concern[0] comment_tag = '' try: story_id = int(re.match(r'.*=(\d+)', link).groups()[0]) except AttributeError: # job listing that points to external link story_id = -1 comments_link = '' comment_count = -1 #-- Get the detail about a story --# story = Story(rank, story_id, title, link, domain, points, submitter, published_time, submitter_profile, num_comments, comments_link, is_self) all_stories.append(story) return all_stories
[ "def", "_build_story", "(", "self", ",", "all_rows", ")", ":", "# list to hold all stories\r", "all_stories", "=", "[", "]", "for", "(", "info", ",", "detail", ")", "in", "all_rows", ":", "#-- Get the into about a story --#\r", "# split in 3 cells\r", "info_cells", "=", "info", ".", "findAll", "(", "'td'", ")", "rank", "=", "int", "(", "info_cells", "[", "0", "]", ".", "string", "[", ":", "-", "1", "]", ")", "title", "=", "'%s'", "%", "info_cells", "[", "2", "]", ".", "find", "(", "'a'", ")", ".", "string", "link", "=", "info_cells", "[", "2", "]", ".", "find", "(", "'a'", ")", ".", "get", "(", "'href'", ")", "# by default all stories are linking posts\r", "is_self", "=", "False", "# the link doesn't contains \"http\" meaning an internal link\r", "if", "link", ".", "find", "(", "'item?id='", ")", "is", "-", "1", ":", "# slice \" (abc.com) \"\r", "domain", "=", "info_cells", "[", "2", "]", ".", "findAll", "(", "'span'", ")", "[", "1", "]", ".", "string", "[", "2", ":", "-", "1", "]", "else", ":", "link", "=", "'%s/%s'", "%", "(", "BASE_URL", ",", "link", ")", "domain", "=", "BASE_URL", "is_self", "=", "True", "#-- Get the into about a story --#\r", "#-- Get the detail about a story --#\r", "# split in 2 cells, we need only second\r", "detail_cell", "=", "detail", ".", "findAll", "(", "'td'", ")", "[", "1", "]", "# list of details we need, 5 count\r", "detail_concern", "=", "detail_cell", ".", "contents", "num_comments", "=", "-", "1", "if", "re", ".", "match", "(", "r'^(\\d+)\\spoint.*'", ",", "detail_concern", "[", "0", "]", ".", "string", ")", "is", "not", "None", ":", "# can be a link or self post\r", "points", "=", "int", "(", "re", ".", "match", "(", "r'^(\\d+)\\spoint.*'", ",", "detail_concern", "[", "0", "]", ".", "string", ")", ".", "groups", "(", ")", "[", "0", "]", ")", "submitter", "=", "'%s'", "%", "detail_concern", "[", "2", "]", ".", "string", "submitter_profile", "=", "'%s/%s'", "%", "(", "BASE_URL", ",", "detail_concern", "[", "2", "]", ".", "get", "(", "'href'", ")", ")", "published_time", "=", "' '", ".", "join", "(", "detail_concern", "[", "3", "]", ".", "strip", "(", ")", ".", "split", "(", ")", "[", ":", "3", "]", ")", "comment_tag", "=", "detail_concern", "[", "4", "]", "story_id", "=", "int", "(", "re", ".", "match", "(", "r'.*=(\\d+)'", ",", "comment_tag", ".", "get", "(", "'href'", ")", ")", ".", "groups", "(", ")", "[", "0", "]", ")", "comments_link", "=", "'%s/item?id=%d'", "%", "(", "BASE_URL", ",", "story_id", ")", "comment_count", "=", "re", ".", "match", "(", "r'(\\d+)\\s.*'", ",", "comment_tag", ".", "string", ")", "try", ":", "# regex matched, cast to int\r", "num_comments", "=", "int", "(", "comment_count", ".", "groups", "(", ")", "[", "0", "]", ")", "except", "AttributeError", ":", "# did not match, assign 0\r", "num_comments", "=", "0", "else", ":", "# this is a job post\r", "points", "=", "0", "submitter", "=", "''", "submitter_profile", "=", "''", "published_time", "=", "'%s'", "%", "detail_concern", "[", "0", "]", "comment_tag", "=", "''", "try", ":", "story_id", "=", "int", "(", "re", ".", "match", "(", "r'.*=(\\d+)'", ",", "link", ")", ".", "groups", "(", ")", "[", "0", "]", ")", "except", "AttributeError", ":", "# job listing that points to external link\r", "story_id", "=", "-", "1", "comments_link", "=", "''", "comment_count", "=", "-", "1", "#-- Get the detail about a story --#\r", "story", "=", "Story", "(", "rank", ",", "story_id", ",", "title", ",", "link", ",", "domain", ",", "points", ",", "submitter", ",", "published_time", ",", "submitter_profile", ",", "num_comments", ",", "comments_link", ",", "is_self", ")", "all_stories", ".", "append", "(", "story", ")", "return", "all_stories" ]
Builds and returns a list of stories (dicts) from the passed source.
[ "Builds", "and", "returns", "a", "list", "of", "stories", "(", "dicts", ")", "from", "the", "passed", "source", "." ]
train
https://github.com/karan/HackerNewsAPI/blob/0e2df2e28f3a6090559eacdefdb99f4d6780ddf5/hn/hn.py#L57-L138
karan/HackerNewsAPI
hn/hn.py
HN.get_stories
def get_stories(self, story_type='', limit=30): """ Yields a list of stories from the passed page of HN. 'story_type' can be: \t'' = top stories (homepage) (default) \t'news2' = page 2 of top stories \t'newest' = most recent stories \t'best' = best stories 'limit' is the number of stories required from the given page. Defaults to 30. Cannot be more than 30. """ if limit is None or limit < 1 or limit > 30: # we need at least 30 items limit = 30 stories_found = 0 # self.more = story_type # while we still have more stories to find while stories_found < limit: # get current page soup soup = get_soup(page=story_type) all_rows = self._get_zipped_rows(soup) # get a list of stories on current page stories = self._build_story(all_rows) # move to next page # self.more = self._get_next_page(soup) for story in stories: yield story stories_found += 1 # if enough stories found, return if stories_found == limit: return
python
def get_stories(self, story_type='', limit=30): """ Yields a list of stories from the passed page of HN. 'story_type' can be: \t'' = top stories (homepage) (default) \t'news2' = page 2 of top stories \t'newest' = most recent stories \t'best' = best stories 'limit' is the number of stories required from the given page. Defaults to 30. Cannot be more than 30. """ if limit is None or limit < 1 or limit > 30: # we need at least 30 items limit = 30 stories_found = 0 # self.more = story_type # while we still have more stories to find while stories_found < limit: # get current page soup soup = get_soup(page=story_type) all_rows = self._get_zipped_rows(soup) # get a list of stories on current page stories = self._build_story(all_rows) # move to next page # self.more = self._get_next_page(soup) for story in stories: yield story stories_found += 1 # if enough stories found, return if stories_found == limit: return
[ "def", "get_stories", "(", "self", ",", "story_type", "=", "''", ",", "limit", "=", "30", ")", ":", "if", "limit", "is", "None", "or", "limit", "<", "1", "or", "limit", ">", "30", ":", "# we need at least 30 items\r", "limit", "=", "30", "stories_found", "=", "0", "# self.more = story_type\r", "# while we still have more stories to find\r", "while", "stories_found", "<", "limit", ":", "# get current page soup\r", "soup", "=", "get_soup", "(", "page", "=", "story_type", ")", "all_rows", "=", "self", ".", "_get_zipped_rows", "(", "soup", ")", "# get a list of stories on current page\r", "stories", "=", "self", ".", "_build_story", "(", "all_rows", ")", "# move to next page\r", "# self.more = self._get_next_page(soup)\r", "for", "story", "in", "stories", ":", "yield", "story", "stories_found", "+=", "1", "# if enough stories found, return\r", "if", "stories_found", "==", "limit", ":", "return" ]
Yields a list of stories from the passed page of HN. 'story_type' can be: \t'' = top stories (homepage) (default) \t'news2' = page 2 of top stories \t'newest' = most recent stories \t'best' = best stories 'limit' is the number of stories required from the given page. Defaults to 30. Cannot be more than 30.
[ "Yields", "a", "list", "of", "stories", "from", "the", "passed", "page", "of", "HN", ".", "story_type", "can", "be", ":", "\\", "t", "=", "top", "stories", "(", "homepage", ")", "(", "default", ")", "\\", "t", "news2", "=", "page", "2", "of", "top", "stories", "\\", "t", "newest", "=", "most", "recent", "stories", "\\", "t", "best", "=", "best", "stories", "limit", "is", "the", "number", "of", "stories", "required", "from", "the", "given", "page", ".", "Defaults", "to", "30", ".", "Cannot", "be", "more", "than", "30", "." ]
train
https://github.com/karan/HackerNewsAPI/blob/0e2df2e28f3a6090559eacdefdb99f4d6780ddf5/hn/hn.py#L140-L175
karan/HackerNewsAPI
hn/hn.py
HN.get_leaders
def get_leaders(self, limit=10): """ Return the leaders of Hacker News """ if limit is None: limit = 10 soup = get_soup('leaders') table = soup.find('table') leaders_table = table.find_all('table')[1] listleaders = leaders_table.find_all('tr')[2:] listleaders.pop(10) # Removing because empty in the Leaders page for i, leader in enumerate(listleaders): if i == limit: return if not leader.text == '': item = leader.find_all('td') yield User(item[1].text, '', item[2].text, item[3].text)
python
def get_leaders(self, limit=10): """ Return the leaders of Hacker News """ if limit is None: limit = 10 soup = get_soup('leaders') table = soup.find('table') leaders_table = table.find_all('table')[1] listleaders = leaders_table.find_all('tr')[2:] listleaders.pop(10) # Removing because empty in the Leaders page for i, leader in enumerate(listleaders): if i == limit: return if not leader.text == '': item = leader.find_all('td') yield User(item[1].text, '', item[2].text, item[3].text)
[ "def", "get_leaders", "(", "self", ",", "limit", "=", "10", ")", ":", "if", "limit", "is", "None", ":", "limit", "=", "10", "soup", "=", "get_soup", "(", "'leaders'", ")", "table", "=", "soup", ".", "find", "(", "'table'", ")", "leaders_table", "=", "table", ".", "find_all", "(", "'table'", ")", "[", "1", "]", "listleaders", "=", "leaders_table", ".", "find_all", "(", "'tr'", ")", "[", "2", ":", "]", "listleaders", ".", "pop", "(", "10", ")", "# Removing because empty in the Leaders page\r", "for", "i", ",", "leader", "in", "enumerate", "(", "listleaders", ")", ":", "if", "i", "==", "limit", ":", "return", "if", "not", "leader", ".", "text", "==", "''", ":", "item", "=", "leader", ".", "find_all", "(", "'td'", ")", "yield", "User", "(", "item", "[", "1", "]", ".", "text", ",", "''", ",", "item", "[", "2", "]", ".", "text", ",", "item", "[", "3", "]", ".", "text", ")" ]
Return the leaders of Hacker News
[ "Return", "the", "leaders", "of", "Hacker", "News" ]
train
https://github.com/karan/HackerNewsAPI/blob/0e2df2e28f3a6090559eacdefdb99f4d6780ddf5/hn/hn.py#L177-L191