sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def sort_players(self, sort_key=None, sort_func=None, reverse=False): """ Return all home and away by player info sorted by either the provided key or function. Must provide at least one of the two parameters. Can sort either ascending or descending. :param sort_key: (def None) dict key to sort on :param sort_func: (def None) sorting function :param reverse: (optional, def False) if True, sort descending :returns: dict of the form ``{ 'home/away': { by_player_dict } }``. See :py:func:`home_players` and :py:func:`away_players` """ def each(d): t = [ ] for num, v in d.items(): ti = { vk: vv for vk, vv in v.items() } ti['num'] = num t.append(ti) if sort_key: return sorted(t, key=lambda k: k[sort_key], reverse=reverse) else: return sorted(t, key=sort_func, reverse=reverse) return self.__apply_to_both(each)
Return all home and away by player info sorted by either the provided key or function. Must provide at least one of the two parameters. Can sort either ascending or descending. :param sort_key: (def None) dict key to sort on :param sort_func: (def None) sorting function :param reverse: (optional, def False) if True, sort descending :returns: dict of the form ``{ 'home/away': { by_player_dict } }``. See :py:func:`home_players` and :py:func:`away_players`
entailment
def top_by_key(self, sort_key): """ Return home/away by player info for the players on each team that are first in the provided category. :param sort_key: str, the dictionary key to be sorted on :returns: dict of the form ``{ 'home/away': { by_player_dict } }``. See :py:func:`home_players` and :py:func:`away_players` """ res = self.sort_players(sort_key=sort_key, reverse=True) return { 'home': res['home'][0], 'away': res['away'][0] }
Return home/away by player info for the players on each team that are first in the provided category. :param sort_key: str, the dictionary key to be sorted on :returns: dict of the form ``{ 'home/away': { by_player_dict } }``. See :py:func:`home_players` and :py:func:`away_players`
entailment
def top_by_func(self, sort_func): """ Return home/away by player info for the players on each team who come in first according to the provided sorting function. Will perform ascending sort. :param sort_func: function that yields the sorting quantity :returns: dict of the form ``{ 'home/away': { by_player_dict } }``. See :py:func:`home_players` and :py:func:`away_players` """ res = self.sort_players(sort_func=sort_func, reverse=True) return { 'home': res['home'][0], 'away': res['away'][0] }
Return home/away by player info for the players on each team who come in first according to the provided sorting function. Will perform ascending sort. :param sort_func: function that yields the sorting quantity :returns: dict of the form ``{ 'home/away': { by_player_dict } }``. See :py:func:`home_players` and :py:func:`away_players`
entailment
def encode(obj): """ Encode one argument/object to json """ if hasattr(obj, 'json'): return obj.json if hasattr(obj, '__json__'): return obj.__json__() return dumps(obj)
Encode one argument/object to json
entailment
def encode_args(args, extra=False): """ Encode a list of arguments """ if not args: return '' methodargs = ', '.join([encode(a) for a in args]) if extra: methodargs += ', ' return methodargs
Encode a list of arguments
entailment
def _send(self, javascript): """ Establishes a socket connection to the zombie.js server and sends Javascript instructions. :param js: the Javascript string to execute """ # Prepend JS to switch to the proper client context. message = """ var _ctx = ctx_switch('%s'), browser = _ctx[0], ELEMENTS = _ctx[1]; %s """ % (id(self), javascript) response = self.connection.send(message) return self._handle_response(response)
Establishes a socket connection to the zombie.js server and sends Javascript instructions. :param js: the Javascript string to execute
entailment
def wait(self, method, *args): """ Call a method on the zombie.js Browser instance and wait on a callback. :param method: the method to call, e.g., html() :param args: one of more arguments for the method """ methodargs = encode_args(args, extra=True) js = """ %s(%s wait_callback); """ % (method, methodargs) self._send(js)
Call a method on the zombie.js Browser instance and wait on a callback. :param method: the method to call, e.g., html() :param args: one of more arguments for the method
entailment
def create_element(self, method, args=None): """ Evaluate a browser method and CSS selector against the document (or an optional context DOMNode) and return a single :class:`zombie.dom.DOMNode` object, e.g., browser._node('query', 'body > div') ...roughly translates to the following Javascript... browser.query('body > div') :param method: the method (e.g., query) to call on the browser :param selector: a string CSS selector (http://zombie.labnotes.org/selectors) :param context: an (optional) instance of :class:`zombie.dom.DOMNode` """ if args is None: arguments = '' else: arguments = "(%s)" % encode_args(args) js = """ create_element(ELEMENTS, %(method)s%(args)s); """ % { 'method': method, 'args': arguments } index = self.json(js) if index is None: return None return Element(index)
Evaluate a browser method and CSS selector against the document (or an optional context DOMNode) and return a single :class:`zombie.dom.DOMNode` object, e.g., browser._node('query', 'body > div') ...roughly translates to the following Javascript... browser.query('body > div') :param method: the method (e.g., query) to call on the browser :param selector: a string CSS selector (http://zombie.labnotes.org/selectors) :param context: an (optional) instance of :class:`zombie.dom.DOMNode`
entailment
def create_elements(self, method, args=[]): """ Execute a browser method that will return a list of elements. Returns a list of the element indexes """ args = encode_args(args) js = """ create_elements(ELEMENTS, %(method)s(%(args)s)) """ % { 'method': method, 'args': args, } indexes = self.json(js) return map(Element, indexes)
Execute a browser method that will return a list of elements. Returns a list of the element indexes
entailment
def fill(self, field, value): """ Fill a specified form field in the current document. :param field: an instance of :class:`zombie.dom.DOMNode` :param value: any string value :return: self to allow function chaining. """ self.client.nowait('browser.fill', (field, value)) return self
Fill a specified form field in the current document. :param field: an instance of :class:`zombie.dom.DOMNode` :param value: any string value :return: self to allow function chaining.
entailment
def query(self, selector, context=None): """ Evaluate a CSS selector against the document (or an optional context :class:`zombie.dom.DOMNode`) and return a single :class:`zombie.dom.DOMNode` object. :param selector: a string CSS selector (http://zombie.labnotes.org/selectors) :param context: an (optional) instance of :class:`zombie.dom.DOMNode` """ element = self.client.create_element( 'browser.query', (selector, context)) return DOMNode.factory(element, self)
Evaluate a CSS selector against the document (or an optional context :class:`zombie.dom.DOMNode`) and return a single :class:`zombie.dom.DOMNode` object. :param selector: a string CSS selector (http://zombie.labnotes.org/selectors) :param context: an (optional) instance of :class:`zombie.dom.DOMNode`
entailment
def queryAll(self, selector, context=None): """ Evaluate a CSS selector against the document (or an optional context :class:`zombie.dom.DOMNode`) and return a list of :class:`zombie.dom.DOMNode` objects. :param selector: a string CSS selector (http://zombie.labnotes.org/selectors) :param context: an (optional) instance of :class:`zombie.dom.DOMNode` """ elements = self.client.create_elements( 'browser.queryAll', (selector, context)) return [DOMNode(e, self) for e in elements]
Evaluate a CSS selector against the document (or an optional context :class:`zombie.dom.DOMNode`) and return a list of :class:`zombie.dom.DOMNode` objects. :param selector: a string CSS selector (http://zombie.labnotes.org/selectors) :param context: an (optional) instance of :class:`zombie.dom.DOMNode`
entailment
def link(self, selector): """ Finds and returns a link ``<a>`` element (:class:`zombie.dom.DOMNode`). You can use a CSS selector or find a link by its text contents (case sensitive, but ignores leading/trailing spaces). :param selector: an optional string CSS selector (http://zombie.labnotes.org/selectors) or inner text """ element = self.client.create_element('browser.link', (selector,)) return DOMNode(element, self)
Finds and returns a link ``<a>`` element (:class:`zombie.dom.DOMNode`). You can use a CSS selector or find a link by its text contents (case sensitive, but ignores leading/trailing spaces). :param selector: an optional string CSS selector (http://zombie.labnotes.org/selectors) or inner text
entailment
def value(self, value): """ Used to set the ``value`` of form elements. """ self.client.nowait( 'set_field', (Literal('browser'), self.element, value))
Used to set the ``value`` of form elements.
entailment
def fire(self, event): """ Fires a specified DOM event on the current node. :param event: the name of the event to fire (e.g., 'click'). Returns the :class:`zombie.dom.DOMNode` to allow function chaining. """ self.browser.fire(self.element, event) return self
Fires a specified DOM event on the current node. :param event: the name of the event to fire (e.g., 'click'). Returns the :class:`zombie.dom.DOMNode` to allow function chaining.
entailment
def load_all(self): """ Force all reports to be loaded and parsed instead of lazy loading on demand. :returns: ``self`` or ``None`` if load fails """ try: self.toi.load_all() self.rosters.load_all() #self.summary.load_all() self.play_by_play.load_all() self.face_off_comp.load_all() return self except Exception as e: print(e) return None
Force all reports to be loaded and parsed instead of lazy loading on demand. :returns: ``self`` or ``None`` if load fails
entailment
def matchup(self): """ Return the game meta information displayed in report banners including team names, final score, game date, location, and attendance. Data format is .. code:: python { 'home': home, 'away': away, 'final': final, 'attendance': att, 'date': date, 'location': loc } :returns: matchup banner info :rtype: dict """ if self.play_by_play.matchup: return self.play_by_play.matchup elif self.rosters.matchup: return self.rosters.matchup elif self.toi.matchup: return self.toi.matchup else: self.face_off_comp.matchup
Return the game meta information displayed in report banners including team names, final score, game date, location, and attendance. Data format is .. code:: python { 'home': home, 'away': away, 'final': final, 'attendance': att, 'date': date, 'location': loc } :returns: matchup banner info :rtype: dict
entailment
def _utf8_encode(self, d): """ Ensures all values are encoded in UTF-8 and converts them to lowercase """ for k, v in d.items(): if isinstance(v, str): d[k] = v.encode('utf8').lower() if isinstance(v, list): for index,item in enumerate(v): item = item.encode('utf8').lower() v[index] = item if isinstance(v, dict): d[k] = self._utf8_encode(v) return d
Ensures all values are encoded in UTF-8 and converts them to lowercase
entailment
def _bool_encode(self, d): """ Converts bool values to lowercase strings """ for k, v in d.items(): if isinstance(v, bool): d[k] = str(v).lower() return d
Converts bool values to lowercase strings
entailment
def _options(self, **kwargs): """ Formats search parameters/values for use with API :param \*\*kwargs: search parameters/values """ def _format_fq(d): for k,v in d.items(): if isinstance(v, list): d[k] = ' '.join(map(lambda x: '"' + x + '"', v)) else: d[k] = '"' + v + '"' values = [] for k,v in d.items(): value = '%s:(%s)' % (k,v) values.append(value) values = ' AND '.join(values) return values kwargs = self._utf8_encode(kwargs) kwargs = self._bool_encode(kwargs) values = '' for k, v in kwargs.items(): if k is 'fq' and isinstance(v, dict): v = _format_fq(v) elif isinstance(v, list): v = ','.join(v) values += '%s=%s&' % (k, v) return values
Formats search parameters/values for use with API :param \*\*kwargs: search parameters/values
entailment
def search(self, response_format = None, key = None, **kwargs): """ Calls the API and returns a dictionary of the search results :param response_format: the format that the API uses for its response, includes JSON (.json) and JSONP (.jsonp). Defaults to '.json'. :param key: a developer key. Defaults to key given when the articleAPI class was initialized. """ if response_format is None: response_format = self.response_format if key is None: key = self.key url = '%s%s?%sapi-key=%s' % ( API_ROOT, response_format, self._options(**kwargs), key ) r = requests.get(url) return r.json()
Calls the API and returns a dictionary of the search results :param response_format: the format that the API uses for its response, includes JSON (.json) and JSONP (.jsonp). Defaults to '.json'. :param key: a developer key. Defaults to key given when the articleAPI class was initialized.
entailment
def parse(self): """Fully parses game summary report. :returns: boolean success indicator :rtype: bool """ r = super(GameSummRep, self).parse() try: self.parse_scoring_summary() return r and False except: return False
Fully parses game summary report. :returns: boolean success indicator :rtype: bool
entailment
def Create(event_type): """ Factory method creates objects derived from :py:class`.Event` with class name matching the :py:class`.EventType`. :param event_type: number for type of event :returns: constructed event corresponding to ``event_type`` :rtype: :py:class:`.Event` """ if event_type in EventType.Name: # unknown event type gets base class if EventType.Name[event_type] == Event.__name__: return Event() else: # instantiate Event subclass with same name as EventType name return [t for t in EventFactory.event_list if t.__name__ == EventType.Name[event_type]][0]() else: raise TypeError("EventFactory.Create: Invalid EventType")
Factory method creates objects derived from :py:class`.Event` with class name matching the :py:class`.EventType`. :param event_type: number for type of event :returns: constructed event corresponding to ``event_type`` :rtype: :py:class:`.Event`
entailment
def home_shift_summ(self): """ :returns: :py:class:`.ShiftSummary` by player for the home team :rtype: dict ``{ player_num: shift_summary_obj }`` """ if not self.__wrapped_home: self.__wrapped_home = self.__wrap(self._home.by_player) return self.__wrapped_home
:returns: :py:class:`.ShiftSummary` by player for the home team :rtype: dict ``{ player_num: shift_summary_obj }``
entailment
def away_shift_summ(self): """ :returns: :py:class:`.ShiftSummary` by player for the away team :rtype: dict ``{ player_num: shift_summary_obj }`` """ if not self.__wrapped_away: self.__wrapped_away = self.__wrap(self._away.by_player) return self.__wrapped_away
:returns: :py:class:`.ShiftSummary` by player for the away team :rtype: dict ``{ player_num: shift_summary_obj }``
entailment
def parse(self): """ Retreive and parse Play by Play data for the given :py:class:`nhlscrapi.games.game.GameKey`` :returns: ``self`` on success, ``None`` otherwise """ try: return super(RosterRep, self).parse() \ .parse_rosters() \ .parse_scratches() \ .parse_coaches() \ .parse_officials() except: return None
Retreive and parse Play by Play data for the given :py:class:`nhlscrapi.games.game.GameKey`` :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_rosters(self): """ Parse the home and away game rosters :returns: ``self`` on success, ``None`` otherwise """ lx_doc = self.html_doc() if not self.__blocks: self.__pl_blocks(lx_doc) for t in ['home', 'away']: self.rosters[t] = self.__clean_pl_block(self.__blocks[t]) return self if self.rosters else None
Parse the home and away game rosters :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_scratches(self): """ Parse the home and away healthy scratches :returns: ``self`` on success, ``None`` otherwise """ lx_doc = self.html_doc() if not self.__blocks: self.__pl_blocks(lx_doc) for t in ['aw_scr', 'h_scr']: ix = 'away' if t == 'aw_scr' else 'home' self.scratches[ix] = self.__clean_pl_block(self.__blocks[t]) return self if self.scratches else None
Parse the home and away healthy scratches :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_coaches(self): """ Parse the home and away coaches :returns: ``self`` on success, ``None`` otherwise """ lx_doc = self.html_doc() tr = lx_doc.xpath('//tr[@id="HeadCoaches"]')[0] for i, td in enumerate(tr): txt = td.xpath('.//text()') txt = ex_junk(txt, ['\n','\r']) team = 'away' if i == 0 else 'home' self.coaches[team] = txt[0] return self if self.coaches else None
Parse the home and away coaches :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_officials(self): """ Parse the officials :returns: ``self`` on success, ``None`` otherwise """ # begin proper body of method lx_doc = self.html_doc() off_parser = opm(self.game_key.season) self.officials = off_parser(lx_doc) return self if self.officials else None
Parse the officials :returns: ``self`` on success, ``None`` otherwise
entailment
def is_email(address, check_dns=False, diagnose=False): """Validate an email address. Keyword arguments: address --- the email address as a string check_dns --- flag for whether to check the DNS status of the domain diagnose --- flag for whether to return True/False or a Diagnosis """ threshold = BaseDiagnosis.CATEGORIES["THRESHOLD"] d = ParserValidator().is_email(address, True) if check_dns is True and d < BaseDiagnosis.CATEGORIES["DNSWARN"]: threshold = BaseDiagnosis.CATEGORIES["VALID"] d = max(d, DNSValidator().is_valid(address.split("@")[1], True)) return d if diagnose else d < threshold
Validate an email address. Keyword arguments: address --- the email address as a string check_dns --- flag for whether to check the DNS status of the domain diagnose --- flag for whether to return True/False or a Diagnosis
entailment
def dispatch_loader(scraper, loader_name): """ Decorator that enforces one time loading for scrapers. The one time loading is applied to partial loaders, e.g. only parse and load the home team roster once. This is not meant to be used directly. :param scraper: property name (string) containing an object of type :py:class:`scrapr.ReportLoader` :param loader_name: name of method that does the scraping/parsing :returns: function wrapper """ l = '.'.join([scraper, loader_name]) def wrapper(f): @wraps(f) def wrapped(self, *f_args, **f_kwargs): if not hasattr(self, '_loaded'): self._loaded = { } already_loaded = self._loaded.setdefault(l, False) if not already_loaded: attr = getattr(self, scraper) self._loaded[l] = getattr(attr, loader_name)() is not None return f(self, *f_args, **f_kwargs) return wrapped return wrapper
Decorator that enforces one time loading for scrapers. The one time loading is applied to partial loaders, e.g. only parse and load the home team roster once. This is not meant to be used directly. :param scraper: property name (string) containing an object of type :py:class:`scrapr.ReportLoader` :param loader_name: name of method that does the scraping/parsing :returns: function wrapper
entailment
def parse_shifts(self): """ Parse shifts from TOI report :returns: self if successfule else None """ lx_doc = self.html_doc() pl_heads = lx_doc.xpath('//td[contains(@class, "playerHeading")]') for pl in pl_heads: sh_sum = { } pl_text = pl.xpath('text()')[0] num_name = pl_text.replace(',','').split(' ') sh_sum['player_num'] = int(num_name[0]) if num_name[0].isdigit() else -1 sh_sum['player_name'] = { 'first': num_name[2], 'last': num_name[1] } first_shift = pl.xpath('../following-sibling::tr')[1] sh_sum['shifts'], last_shift = self.__player_shifts(first_shift) while ('Per' not in last_shift.xpath('.//text()')): last_shift = last_shift.xpath('following-sibling::tr')[0] per_summ = last_shift.xpath('.//tr')[0] sh_sum['by_period'], last_sum = self.__get_by_per_summ(per_summ) self.by_player[sh_sum['player_num']] = sh_sum return self if self.by_player else None
Parse shifts from TOI report :returns: self if successfule else None
entailment
def is_valid(self, domain, diagnose=False): """Check whether a domain has a valid MX or A record. Keyword arguments: domain --- the domain to check diagnose --- flag to report a diagnosis or a boolean (default False) """ return_status = [ValidDiagnosis()] dns_checked = False # http://tools.ietf.org/html/rfc5321#section-2.3.5 # Names that can be resolved to MX RRs or address (i.e., A or AAAA) # RRs (as discussed in Section 5) are permitted, as are CNAME RRs # whose targets can be resolved, in turn, to MX or address RRs. # # http://tools.ietf.org/html/rfc5321#section-5.1 # The lookup first attempts to locate an MX record associated with # the name. If a CNAME record is found, the resulting name is # processed as if it were the initial name. ... If an empty list of # MXs is returned, the address is treated as if it was associated # with an implicit MX RR, with a preference of 0, pointing to that # host. # # is_email() author's note: We will regard the existence of a CNAME to # be sufficient evidence of the domain's existence. For performance # reasons we will not repeat the DNS lookup for the CNAME's target, but # we will raise a warning because we didn't immediately find an MX # record. try: dns.resolver.query(domain, 'MX') dns_checked = True except (dns.resolver.NXDOMAIN, dns.name.NameTooLong): # Domain can't be found in DNS return_status.append(DNSDiagnosis('NO_RECORD')) # Since dns.resolver gives more information than the PHP analog, we # can say that TLDs that throw an NXDOMAIN or NameTooLong error # have been checked if len(domain.split('.')) == 1: dns_checked = True except dns.resolver.NoAnswer: # MX-record for domain can't be found return_status.append(DNSDiagnosis('NO_MX_RECORD')) try: # TODO: See if we can/need to narrow to A / CNAME dns.resolver.query(domain) except dns.resolver.NoAnswer: # No usable records for the domain can be found return_status.append(DNSDiagnosis('NO_RECORD')) except dns.resolver.NoNameservers: return_status.append(DNSDiagnosis('NO_NAMESERVERS')) except (dns.exception.Timeout, dns.resolver.Timeout): return_status.append(DNSDiagnosis('DNS_TIMEDOUT')) # Check for TLD addresses # ----------------------- # TLD addresses are specifically allowed in RFC 5321 but they are # unusual to say the least. We will allocate a separate # status to these addresses on the basis that they are more likely # to be typos than genuine addresses (unless we've already # established that the domain does have an MX record) # # http://tools.ietf.org/html/rfc5321#section-2.3.5 # In the case of a top-level domain used by itself in an address, a # single string is used without any dots. This makes the requirement, # described in more detail below, that only fully-qualified domain # names appear in SMTP transactions on the public Internet, # particularly important where top-level domains are involved. # # TLD format # ---------- # The format of TLDs has changed a number of times. The standards # used by IANA have been largely ignored by ICANN, leading to # confusion over the standards being followed. These are not defined # anywhere, except as a general component of a DNS host name (a label). # However, this could potentially lead to 123.123.123.123 being a # valid DNS name (rather than an IP address) and thereby creating # an ambiguity. The most authoritative statement on TLD formats that # the author can find is in a (rejected!) erratum to RFC 1123 # submitted by John Klensin, the author of RFC 5321: # # http://www.rfc-editor.org/errata_search.php?rfc=1123&eid=1353 # However, a valid host name can never have the dotted-decimal # form #.#.#.#, since this change does not permit the highest-level # component label to start with a digit even if it is not # all-numeric. if not dns_checked: atom_list = domain.split(".") if len(atom_list) == 1: return_status.append(RFC5321Diagnosis('TLD')) try: float(atom_list[len(atom_list)-1][0]) return_status.append(RFC5321Diagnosis('TLDNUMERIC')) except ValueError: pass final_status = max(return_status) return final_status if diagnose else final_status == ValidDiagnosis()
Check whether a domain has a valid MX or A record. Keyword arguments: domain --- the domain to check diagnose --- flag to report a diagnosis or a boolean (default False)
entailment
def html_doc(self): """ :returns: the lxml processed html document :rtype: ``lxml.html.document_fromstring`` output """ if self.__lx_doc is None: cn = NHLCn() if hasattr(cn, self.report_type): html = getattr(cn, self.report_type)(self.game_key) else: raise ValueError('Invalid report type: %s' % self.report_type) if cn.req_err is None: self.__lx_doc = fromstring(html) else: self.req_err = cn.req_err return self.__lx_doc
:returns: the lxml processed html document :rtype: ``lxml.html.document_fromstring`` output
entailment
def parse_matchup(self): """ Parse the banner matchup meta info for the game. :returns: ``self`` on success or ``None`` """ lx_doc = self.html_doc() try: if not self.matchup: self.matchup = self._fill_meta(lx_doc) return self except: return None
Parse the banner matchup meta info for the game. :returns: ``self`` on success or ``None``
entailment
def parse_plays_stream(self): """Generate and yield a stream of parsed plays. Useful for per play processing.""" lx_doc = self.html_doc() if lx_doc is not None: parser = PlayParser(self.game_key.season, self.game_key.game_type) plays = lx_doc.xpath('//tr[@class = "evenColor"]') for p in plays: p_obj = parser.build_play(p) self.plays.append(p_obj) yield p_obj
Generate and yield a stream of parsed plays. Useful for per play processing.
entailment
def ColMap(season): """ Returns a dictionary mapping the type of information in the RTSS play row to the appropriate column number. The column locations pre/post 2008 are different. :param season: int for the season number :returns: mapping of RTSS column to info type :rtype: dict, keys are ``'play_num', 'per', 'str', 'time', 'event', 'desc', 'vis', 'home'`` """ if c.MIN_SEASON <= season <= c.MAX_SEASON: return { "play_num": 0, "per": 1, "str": 2, "time": 3, "event": 4, "desc": 5, "vis": 6, "home": 7 } else: raise ValueError("RTSSCol.MAP(season): Invalid season " + str(season))
Returns a dictionary mapping the type of information in the RTSS play row to the appropriate column number. The column locations pre/post 2008 are different. :param season: int for the season number :returns: mapping of RTSS column to info type :rtype: dict, keys are ``'play_num', 'per', 'str', 'time', 'event', 'desc', 'vis', 'home'``
entailment
def build_play(self, pbp_row): """ Parses table row from RTSS. These are the rows tagged with ``<tr class='evenColor' ... >``. Result set contains :py:class:`nhlscrapi.games.playbyplay.Strength` and :py:class:`nhlscrapi.games.events.EventType` objects. Returned play data is in the form .. code:: python { 'play_num': num_of_play 'period': curr_period 'strength': strength_enum 'time': { 'min': min, 'sec': sec } 'vis_on_ice': { 'player_num': player } 'home_on_ice': { 'player_num': player } 'event': event_object } :param pbp_row: table row from RTSS :returns: play data :rtype: dict """ d = pbp_row.findall('./td') c = PlayParser.ColMap(self.season) p = { } to_dig = lambda t: int(t) if t.isdigit() else 0 p['play_num'] = to_int(d[c["play_num"]].text, 0) p['period'] = to_int(d[c["per"]].text, 0) p['strength'] = self.__strength(d[c["str"]].text) time = d[c["time"]].text.split(":") p['time'] = { "min": int(time[0]), "sec": int(time[1]) } skater_tab = d[c["vis"]].xpath("./table") p['vis_on_ice'] = self.__skaters(skater_tab[0][0]) if len(skater_tab) else { } skater_tab = d[c["home"]].xpath("./table") p['home_on_ice'] = self.__skaters(skater_tab[0][0]) if len(skater_tab) else { } p['event'] = event_type_mapper( d[c["event"]].text, period=p['period'], skater_ct=len(p['vis_on_ice']) + len(p['home_on_ice']), game_type=self.game_type ) p['event'].desc = " ".join([t.encode('ascii', 'replace').decode('utf-8') for t in d[c["desc"]].xpath("text()")]) parse_event_desc(p['event'], season=self.season) return p
Parses table row from RTSS. These are the rows tagged with ``<tr class='evenColor' ... >``. Result set contains :py:class:`nhlscrapi.games.playbyplay.Strength` and :py:class:`nhlscrapi.games.events.EventType` objects. Returned play data is in the form .. code:: python { 'play_num': num_of_play 'period': curr_period 'strength': strength_enum 'time': { 'min': min, 'sec': sec } 'vis_on_ice': { 'player_num': player } 'home_on_ice': { 'player_num': player } 'event': event_object } :param pbp_row: table row from RTSS :returns: play data :rtype: dict
entailment
def __skaters(self, tab): """ Constructs dictionary of players on the ice in the provided table at time of play. :param tab: RTSS table of the skaters and goalie on at the time of the play :rtype: dictionary, key = player number, value = [position, name] """ res = { } for td in tab.iterchildren(): if len(td): pl_data = td.xpath("./table/tr") pl = pl_data[0].xpath("./td/font") if pl[0].text.isdigit(): res[int(pl[0].text)] = [s.strip() for s in pl[0].get("title").split("-")][::-1] s = pl[0].get("title").split("-") pos = pl_data[1].getchildren()[0].text return res
Constructs dictionary of players on the ice in the provided table at time of play. :param tab: RTSS table of the skaters and goalie on at the time of the play :rtype: dictionary, key = player number, value = [position, name]
entailment
def exclude_from(l, containing = [], equal_to = []): """Exclude elements in list l containing any elements from list ex. Example: >>> l = ['bob', 'r', 'rob\r', '\r\nrobert'] >>> containing = ['\n', '\r'] >>> equal_to = ['r'] >>> exclude_from(l, containing, equal_to) ['bob'] """ cont = lambda li: any(c in li for c in containing) eq = lambda li: any(e == li for e in equal_to) return [li for li in l if not (cont(li) or eq(li))]
Exclude elements in list l containing any elements from list ex. Example: >>> l = ['bob', 'r', 'rob\r', '\r\nrobert'] >>> containing = ['\n', '\r'] >>> equal_to = ['r'] >>> exclude_from(l, containing, equal_to) ['bob']
entailment
def calc_surfdist(surface, labels, annot, reg, origin, target): import nibabel as nib import numpy as np import os from surfdist import load, utils, surfdist import csv """ inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4) """ # Load stuff surf = nib.freesurfer.read_geometry(surface) cort = np.sort(nib.freesurfer.read_label(labels)) src = load.load_freesurfer_label(annot, origin, cort) # Calculate distances dist = surfdist.dist_calc(surf, cort, src) # Project distances to target trg = nib.freesurfer.read_geometry(target)[0] native = nib.freesurfer.read_geometry(reg)[0] idx_trg_to_native = utils.find_node_match(trg, native)[0] # Get indices in trg space distt = dist[idx_trg_to_native] # Write to file and return file handle filename = os.path.join(os.getcwd(),'distances.csv') distt.tofile(filename,sep=",") return filename
inputs: surface - surface file (e.g. lh.pial, with full path) labels - label file (e.g. lh.cortex.label, with full path) annot - annot file (e.g. lh.aparc.a2009s.annot, with full path) reg - registration file (lh.sphere.reg) origin - the label from which we calculate distances target - target surface (e.g. fsaverage4)
entailment
def stack_files(files, hemi, source, target): """ This function takes a list of files as input and vstacks them """ import csv import os import numpy as np fname = "sdist_%s_%s_%s.csv" % (hemi, source, target) filename = os.path.join(os.getcwd(),fname) alldist = [] for dfile in files: alldist.append(np.genfromtxt(dfile, delimiter=',')) alldist = np.array(alldist) alldist.tofile(filename,",") return filename
This function takes a list of files as input and vstacks them
entailment
def get_short_url(self, obj): """ Get short URL of blog post like '/blog/<slug>/' using ``get_absolute_url`` if available. Removes dependency on reverse URLs of Mezzanine views when deploying Mezzanine only as an API backend. """ try: url = obj.get_absolute_url() except NoReverseMatch: url = '/blog/' + obj.slug return url
Get short URL of blog post like '/blog/<slug>/' using ``get_absolute_url`` if available. Removes dependency on reverse URLs of Mezzanine views when deploying Mezzanine only as an API backend.
entailment
def head_to_head(self, home_num, away_num): """ Return the head-to-head face-off outcomes between two players. If the matchup didn't happen, ``{ }`` is returned. :param home_num: the number of the home team player :param away_num: the number of the away team player :returns: dict, either ``{ }`` or the following .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } } """ if home_num in self.home_fo and away_num in self.home_fo[home_num]['opps']: h_fo = self.home_fo[home_num]['opps'][away_num] a_fo = self.away_fo[away_num]['opps'][home_num] return { 'home': { k: h_fo[k] for k in self.__zones }, 'away': { k: a_fo[k] for k in self.__zones } } else: return { }
Return the head-to-head face-off outcomes between two players. If the matchup didn't happen, ``{ }`` is returned. :param home_num: the number of the home team player :param away_num: the number of the away team player :returns: dict, either ``{ }`` or the following .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } }
entailment
def team_totals(self): """ Returns the overall faceoff win/total breakdown for home and away as :returns: dict, ``{ 'home/away': { 'won': won, 'total': total } }`` """ if self.__team_tots is None: self.__team_tots = self.__comp_tot() return { t: self.__team_tots[t]['all'] for t in [ 'home', 'away' ] }
Returns the overall faceoff win/total breakdown for home and away as :returns: dict, ``{ 'home/away': { 'won': won, 'total': total } }``
entailment
def by_zone(self): """ Returns the faceoff win/total breakdown by zone for home and away as .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } } :returns: dict """ if self.__team_tots is None: self.__team_tots = self.__comp_tot() return { t: { z: self.__team_tots[t][z] for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
Returns the faceoff win/total breakdown by zone for home and away as .. code:: python { 'home/away': { 'off/def/neut/all': { 'won': won, 'total': total } } } :returns: dict
entailment
def fo_pct(self): """ Get the by team overall face-off win %. :returns: dict, ``{ 'home': %, 'away': % }`` """ tots = self.team_totals return { t: tots[t]['won']/(1.0*tots[t]['total']) if tots[t]['total'] else 0.0 for t in [ 'home', 'away' ] }
Get the by team overall face-off win %. :returns: dict, ``{ 'home': %, 'away': % }``
entailment
def fo_pct_by_zone(self): """ Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }`` """ bz = self.by_zone return { t: { z: bz[t][z]['won']/(1.0*bz[t][z]['total']) if bz[t][z]['total'] else 0.0 for z in self.__zones if z != 'all' } for t in [ 'home', 'away' ] }
Get the by team face-off win % by zone. Format is :returns: dict ``{ 'home/away': { 'off/def/neut': % } }``
entailment
def update(self, play): """ Update the accumulator with the current play :returns: new tally :rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }`` """ new_tally = { } #if any(isinstance(play.event, te) for te in self.trigger_event_types): if self._count_play(play): # the team who made the play / triggered the event team = self._get_team(play) try: self.total[team] += 1 except: self.total[team] = 1 self.teams.append(team) for i in range(len(self.tally)): self.tally[i][team] = 0 try: new_tally = { k:v for k,v in self.tally[len(self.tally)-1].items() } new_tally['period'] = play.period new_tally['time'] = play.time new_tally[team] += 1 new_tally['play'] = play except: new_tally = { 'period': play.period, 'time': play.time, team: 1, 'play': play } self.tally.append(new_tally) return new_tally
Update the accumulator with the current play :returns: new tally :rtype: dict, ``{ 'period': per, 'time': clock, 'team': cumul, 'play': play }``
entailment
def share(self): """ The Cori-share (% of shot attempts) for each team :returns: dict, ``{ 'home_name': %, 'away_name': % }`` """ tot = sum(self.total.values()) return { k: v/float(tot) for k,v in self.total.items() }
The Cori-share (% of shot attempts) for each team :returns: dict, ``{ 'home_name': %, 'away_name': % }``
entailment
def compute_stats(self): """ Compute the stats defined in ``self.cum_stats``. :returns: collection of all computed :py:class:`.AccumulateStats` :rtype: dict """ if not self.__have_stats: if self.init_cs_teams and self.cum_stats: self.__init_cs_teams() for play in self._rep_reader.parse_plays_stream(): p = Play(**play) self.__wrapped_plays.append(p) if self.cum_stats: self.__process(p, self.cum_stats, 'update') self.__have_stats = True return self.cum_stats
Compute the stats defined in ``self.cum_stats``. :returns: collection of all computed :py:class:`.AccumulateStats` :rtype: dict
entailment
def __html_rep(self, game_key, rep_code): """Retrieves the nhl html reports for the specified game and report code""" seas, gt, num = game_key.to_tuple() url = [ self.__domain, "scores/htmlreports/", str(seas-1), str(seas), "/", rep_code, "0", str(gt), ("%04i" % (num)), ".HTM" ] url = ''.join(url) return self.__open(url)
Retrieves the nhl html reports for the specified game and report code
entailment
def to_char(token): """Transforms the ASCII control character symbols to their real char. Note: If the token is not an ASCII control character symbol, just return the token. Keyword arguments: token -- the token to transform """ if ord(token) in _range(9216, 9229 + 1): token = _unichr(ord(token) - 9216) return token
Transforms the ASCII control character symbols to their real char. Note: If the token is not an ASCII control character symbol, just return the token. Keyword arguments: token -- the token to transform
entailment
def is_email(self, address, diagnose=False): """Check that an address address conforms to RFCs 5321, 5322 and others. More specifically, see the follow RFCs: * http://tools.ietf.org/html/rfc5321 * http://tools.ietf.org/html/rfc5322 * http://tools.ietf.org/html/rfc4291#section-2.2 * http://tools.ietf.org/html/rfc1123#section-2.1 * http://tools.ietf.org/html/rfc3696) (guidance only) Keyword arguments: address -- address to check. diagnose -- flag to report a diagnosis or a boolean (default False) """ threshold = BaseDiagnosis.CATEGORIES['VALID'] return_status = [ValidDiagnosis()] parse_data = {} # Parse the address into components, character by character raw_length = len(address) context = Context.LOCALPART # Where we are context_stack = [context] # Where we've been context_prior = Context.LOCALPART # Where we just came from token = '' # The current character token_prior = '' # The previous character parse_data[Context.LOCALPART] = '' # The address' components parse_data[Context.DOMAIN] = '' atom_list = { Context.LOCALPART: [''], Context.DOMAIN: [''] } # The address' dot-atoms element_count = 0 element_len = 0 hyphen_flag = False # Hyphen cannot occur at the end of a subdomain end_or_die = False # CFWS can only appear at the end of an element skip = False # Skip flag that simulates i++ crlf_count = -1 # crlf_count = -1 == !isset(crlf_count) for i in _range(raw_length): # Skip simulates the use of ++ operator if skip: skip = False continue token = address[i] token = to_char(token) # Switch to simulate decrementing; needed for FWS repeat = True while repeat: repeat = False # ------------------------------------------------------- # Local part # ------------------------------------------------------- if context == Context.LOCALPART: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # local-part = dot-atom / quoted-string / # obs-local-part # # dot-atom = [CFWS] dot-atom-text [CFWS] # # dot-atom-text = 1*atext *("." 1*atext) # # quoted-string = [CFWS] # DQUOTE *([FWS] qcontent) [FWS] DQUOTE # [CFWS] # # obs-local-part = word *("." word) # # word = atom / quoted-string # # atom = [CFWS] 1*atext [CFWS] if token == Char.OPENPARENTHESIS: if element_len == 0: # Comments are OK at the beginning of an element if element_count == 0: return_status.append(CFWSDiagnosis('COMMENT')) else: return_status.append( DeprecatedDiagnosis('COMMENT')) else: return_status.append(CFWSDiagnosis('COMMENT')) # We can't start a comment in the middle of an # element, so this better be the end end_or_die = True context_stack.append(context) context = Context.COMMENT elif token == Char.DOT: if element_len == 0: # Another dot, already? Fatal error if element_count == 0: return_status.append( InvalidDiagnosis('DOT_START')) else: return_status.append( InvalidDiagnosis('CONSECUTIVEDOTS')) else: # The entire local-part can be a quoted string for # RFC 5321. If it's just one atom that is quoted # then it's an RFC 5322 obsolete form if end_or_die: return_status.append( DeprecatedDiagnosis('LOCALPART')) # CFWS & quoted strings are OK again now we're at # the beginning of an element (although they are # obsolete forms) end_or_die = False element_len = 0 element_count += 1 parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART].append('') elif token == Char.DQUOTE: if element_len == 0: # The entire local-part can be a quoted string for # RFC 5321. If it's just one atom that is quoted # then it's an RFC 5322 obsolete form if element_count == 0: return_status.append( RFC5321Diagnosis('QUOTEDSTRING')) else: return_status.append( DeprecatedDiagnosis('LOCALPART')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 end_or_die = True context_stack.append(context) context = Context.QUOTEDSTRING else: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break if element_len == 0: if element_count == 0: return_status.append(CFWSDiagnosis('FWS')) else: return_status.append( DeprecatedDiagnosis('FWS')) else: # We can't start FWS in the middle of an element, # so this better be the end end_or_die = True context_stack.append(context) context = Context.FWS token_prior = token # @ elif token == Char.AT: # At this point we should have a valid local-part if len(context_stack) != 1: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False if parse_data[Context.LOCALPART] == '': # Fatal error return_status.append( InvalidDiagnosis('NOLOCALPART')) elif element_len == 0: # Fatal error return_status.append(InvalidDiagnosis('DOT_END')) # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.1 # The maximum total length of a user name or other # local-part is 64 octets. elif len(parse_data[Context.LOCALPART]) > 64: return_status.append( RFC5322Diagnosis('LOCAL_TOOLONG')) # http://tools.ietf.org/html/rfc5322#section-3.4.1 # Comments and folding white space # SHOULD NOT be used around the "@" in the addr-spec. # # http://tools.ietf.org/html/rfc2119 # 4. SHOULD NOT This phrase, or the phrase "NOT # RECOMMENDED" mean that there may exist valid # reasons in particular circumstances when the # particular behavior is acceptable or even useful, # but the full implications should be understood and # the case carefully weighed before implementing any # behavior described with this label. elif context_prior in [Context.COMMENT, Context.FWS]: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) # Clear everything down for the domain parsing context = Context.DOMAIN context_stack = [] element_count = 0 element_len = 0 # CFWS can only appear at the end of the element end_or_die = False # atext else: # http://tools.ietf.org/html/rfc5322#section-3.2.3 # atext = ALPHA / DIGIT / ; Printable US-ASCII # "!" / "#" / ; characters not # "$" / "%" / ; including specials. # "&" / "'" / ; Used for atoms. # "*" / "+" / # "-" / "/" / # "=" / "?" / # "^" / "_" / # "`" / "{" / # "|" / "}" / # "~" if end_or_die: # We have encountered atext where it is no longer # valid if context_prior in [Context.COMMENT, Context.FWS]: return_status.append( InvalidDiagnosis('ATEXT_AFTER_CFWS')) elif context_prior == Context.QUOTEDSTRING: return_status.append( InvalidDiagnosis('ATEXT_AFTER_QS')) else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False else: context_prior = context o = ord(token) if (o < 33 or o > 126 or o == 10 or token in Char.SPECIALS): return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 # ------------------------------------------------------- # Domain # ------------------------------------------------------- elif context == Context.DOMAIN: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # domain = dot-atom / domain-literal / obs-domain # # dot-atom = [CFWS] dot-atom-text [CFWS] # # dot-atom-text = 1*atext *("." 1*atext) # # domain-literal = [CFWS] # "[" *([FWS] dtext) [FWS] "]" # [CFWS] # # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-domain = atom *("." atom) # # atom = [CFWS] 1*atext [CFWS] # # # http://tools.ietf.org/html/rfc5321#section-4.1.2 # Mailbox = Local-part # "@" # ( Domain / address-literal ) # # Domain = sub-domain *("." sub-domain) # # address-literal = "[" ( IPv4-address-literal / # IPv6-address-literal / # General-address-literal ) "]" # ; See Section 4.1.3 # # http://tools.ietf.org/html/rfc5322#section-3.4.1 # Note: A liberal syntax for the domain portion of # addr-spec is given here. However, the domain portion # contains addressing information specified by and # used in other protocols (e.g., RFC 1034, RFC 1035, # RFC 1123, RFC5321). It is therefore incumbent upon # implementations to conform to the syntax of # addresse for the context in which they are used. # is_email() author's note: it's not clear how to interpret # this in the context of a general address address # validator. The conclusion I have reached is this: # "addressing information" must comply with RFC 5321 (and # in turn RFC 1035), anything that is "semantically # invisible" must comply only with RFC 5322. # Comment if token == Char.OPENPARENTHESIS: if element_len == 0: # Comments at the start of the domain are # deprecated in the text # Comments at the start of a subdomain are # obs-domain # (http://tools.ietf.org/html/rfc5322#section-3.4.1) if element_count == 0: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) else: return_status.append( DeprecatedDiagnosis('COMMENT')) else: return_status.append(CFWSDiagnosis('COMMENT')) # We can't start a comment in the middle of an # element, so this better be the end end_or_die = True context_stack.append(context) context = Context.COMMENT # Next dot-atom element elif token == Char.DOT: if element_len == 0: # Another dot, already? Fatal error if element_count == 0: return_status.append( InvalidDiagnosis('DOT_START')) else: return_status.append( InvalidDiagnosis('CONSECUTIVEDOTS')) elif hyphen_flag: # Previous subdomain ended in a hyphen. Fatal error return_status.append( InvalidDiagnosis('DOMAINHYPHENEND')) else: # Nowhere in RFC 5321 does it say explicitly that # the domain part of a Mailbox must be a valid # domain according to the DNS standards set out in # RFC 1035, but this *is* implied in several # places. For instance, wherever the idea of host # routing is discussed the RFC says that the domain # must be looked up in the DNS. This would be # nonsense unless the domain was designed to be a # valid DNS domain. Hence we must conclude that the # RFC 1035 restriction on label length also applies # to RFC 5321 domains. # # http://tools.ietf.org/html/rfc1035#section-2.3.4 # labels 63 octets or less if element_len > 63: return_status.append( RFC5322Diagnosis('LABEL_TOOLONG')) # CFWS is OK again now we're at the beginning of an # element (although it may be obsolete CFWS) end_or_die = False element_len = 0 element_count += 1 atom_list[Context.DOMAIN].append('') parse_data[Context.DOMAIN] += token # Domain literal elif token == Char.OPENSQBRACKET: if parse_data[Context.DOMAIN] == '': # Domain literal must be the only component end_or_die = True element_len += 1 context_stack.append(context) context = Context.LITERAL parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token parse_data['literal'] = '' else: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if i+1 == raw_length or (to_char(address[i + 1]) != Char.LF): # Fatal error return_status.append( InvalidDiagnosis('CR_NO_LF')) break if element_len == 0: if element_count == 0: return_status.append( DeprecatedDiagnosis('CFWS_NEAR_AT')) else: return_status.append( DeprecatedDiagnosis('FWS')) else: return_status.append(CFWSDiagnosis('FWS')) # We can't start FWS in the middle of an element, # so this better be the end end_or_die = True context_stack.append(context) context = Context.FWS token_prior = token # atext else: # RFC 5322 allows any atext... # http://tools.ietf.org/html/rfc5322#section-3.2.3 # atext = ALPHA / DIGIT / ; Printable US-ASCII # "!" / "#" / ; characters not # "$" / "%" / ; including specials. # "&" / "'" / ; Used for atoms. # "*" / "+" / # "-" / "/" / # "=" / "?" / # "^" / "_" / # "`" / "{" / # "|" / "}" / # "~" # But RFC 5321 only allows letter-digit-hyphen to # comply with DNS rules (RFCs 1034 & 1123) # http://tools.ietf.org/html/rfc5321#section-4.1.2 # sub-domain = Let-dig [Ldh-str] # # Let-dig = ALPHA / DIGIT # # Ldh-str = *( ALPHA / DIGIT / "-" ) Let-dig # if end_or_die: # We have encountered atext where it is no longer # valid if context_prior in [Context.COMMENT, Context.FWS]: return_status.append( InvalidDiagnosis('ATEXT_AFTER_CFWS')) elif context_prior == Context.LITERAL: return_status.append( InvalidDiagnosis('ATEXT_AFTER_DOMLIT')) else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False o = ord(token) # Assume this token isn't a hyphen unless we discover # it is hyphen_flag = False if o < 33 or o > 126 or token in Char.SPECIALS: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_ATEXT')) elif token == Char.HYPHEN: if element_len == 0: # Hyphens can't be at the beginning of a # subdomain # Fatal error return_status.append( InvalidDiagnosis('DOMAINHYPHENSTART')) hyphen_flag = True elif not (47 < o < 58 or 64 < o < 91 or 96 < o < 123): # Not an RFC 5321 subdomain, but still OK by RFC # 5322 return_status.append(RFC5322Diagnosis('DOMAIN')) parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 # ------------------------------------------------------- # Domain literal # ------------------------------------------------------- elif context == Context.LITERAL: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # domain-literal = [CFWS] # "[" *([FWS] dtext) [FWS] "]" # [CFWS] # # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-dtext = obs-NO-WS-CTL / quoted-pair # End of domain literal if token == Char.CLOSESQBRACKET: if (max(return_status) < BaseDiagnosis.CATEGORIES['DEPREC']): # Could be a valid RFC 5321 address literal, so # let's check # # http://tools.ietf.org/html/rfc5321#section-4.1.2 # address-literal = "[" ( IPv4-address-literal / # IPv6-address-literal / # General-address-literal ) "]" # ; See Section 4.1.3 # # http://tools.ietf.org/html/rfc5321#section-4.1.3 # IPv4-address-literal = Snum 3("." Snum) # # IPv6-address-literal = "IPv6:" IPv6-addr # # General-address-literal = Standardized-tag ":" # 1*dcontent # # Standardized-tag = Ldh-str # ; Standardized-tag MUST be # ; specified in a # ; Standards-Track RFC and # ; registered with IANA # # dcontent = %d33-90 / ; Printable US-ASCII # %d94-126 ; excl. "[", "\", "]" # # Snum = 1*3DIGIT # ; representing a decimal integer # ; value in the range 0-255 # # IPv6-addr = IPv6-full / IPv6-comp / # IPv6v4-full / IPv6v4-comp # # IPv6-hex = 1*4HEXDIG # # IPv6-full = IPv6-hex 7(":" IPv6-hex) # # IPv6-comp = [IPv6-hex *5(":" IPv6-hex)] # "::" # [IPv6-hex *5(":" IPv6-hex)] # ; The "::" represents at least 2 # ; 16-bit groups of zeros. No more # ; than 6 groups in addition to # ; the "::" may be present. # # IPv6v4-full = IPv6-hex 5(":" IPv6-hex) ":" # IPv4-address-literal # # IPv6v4-comp = [IPv6-hex *3(":" IPv6-hex)] # "::" # [IPv6-hex *3(":" IPv6-hex) ":"] # IPv4-address-literal # ; The "::" represents at least 2 # ; 16-bit groups of zeros. No more # ; than 4 groups in addition to # ; the "::" and # ; IPv4-address-literal may be # ; present. max_groups = 8 index = False address_literal = parse_data['literal'] # Extract IPv4 part from the end of the # address-literal (if there is one) regex = ( r"\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.)" r"{3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$" ) match_ip = re.search(regex, address_literal) if match_ip: index = address_literal.rfind( match_ip.group(0)) if index != 0: # Convert IPv4 part to IPv6 format for # further testing address_literal = ( address_literal[0:index] + '0:0') if index == 0 and index is not False: # Nothing there except a valid IPv4 address return_status.append( RFC5321Diagnosis('ADDRESSLITERAL')) elif not address_literal.startswith(Char.IPV6TAG): return_status.append( RFC5322Diagnosis('DOMAINLITERAL')) else: ipv6 = address_literal[5:] # Revision 2.7: Daniel Marschall's new IPv6 # testing strategy match_ip = ipv6.split(Char.COLON) grp_count = len(match_ip) index = ipv6.find(Char.DOUBLECOLON) if index == -1: # We need exactly the right number of # groups if grp_count != max_groups: return_status.append( RFC5322Diagnosis('IPV6_GRPCOUNT')) else: if index != ipv6.rfind(Char.DOUBLECOLON): return_status.append( RFC5322Diagnosis('IPV6_2X2XCOLON')) else: if index in [0, len(ipv6) - 2]: # RFC 4291 allows :: at the start # or end of an address with 7 other # groups in addition max_groups += 1 if grp_count > max_groups: return_status.append( RFC5322Diagnosis( 'IPV6_MAXGRPS')) elif grp_count == max_groups: # Eliding a single "::" return_status.append( RFC5321Diagnosis( 'IPV6DEPRECATED')) # Revision 2.7: Daniel Marschall's new IPv6 # testing strategy if (ipv6[0] == Char.COLON and ipv6[1] != Char.COLON): # Address starts with a single colon return_status.append( RFC5322Diagnosis('IPV6_COLONSTRT')) elif (ipv6[-1] == Char.COLON and ipv6[-2] != Char.COLON): # Address ends with a single colon return_status.append( RFC5322Diagnosis('IPV6_COLONEND')) elif ([re.match(r"^[0-9A-Fa-f]{0,4}$", i) for i in match_ip].count(None) != 0): # Check for unmatched characters return_status.append( RFC5322Diagnosis('IPV6_BADCHAR')) else: return_status.append( RFC5321Diagnosis('ADDRESSLITERAL')) else: return_status.append( RFC5322Diagnosis('DOMAINLITERAL')) parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 context_prior = context context = context_stack.pop() elif token == Char.BACKSLASH: return_status.append( RFC5322Diagnosis('DOMLIT_OBSDTEXT')) context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # dtext else: # http://tools.ietf.org/html/rfc5322#section-3.4.1 # dtext = %d33-90 / ; Printable US-ASCII # %d94-126 / ; characters not # obs-dtext ; including [, ], or \ # # obs-dtext = obs-NO-WS-CTL / quoted-pair # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do # %d12 / ; not include the # %d14-31 / ; carriage return, line # %d127 ; feed, and white space # ; characters o = ord(token) # CR, LF, SP & HTAB have already been parsed above if o > 127 or o == 0 or token == Char.OPENSQBRACKET: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_DTEXT')) break elif o < 33 or o == 127: return_status.append( RFC5322Diagnosis('DOMLIT_OBSDTEXT')) parse_data['literal'] += token parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token element_len += 1 # ------------------------------------------------------- # Quoted string # ------------------------------------------------------- elif context == Context.QUOTEDSTRING: # http://tools.ietf.org/html/rfc5322#section-3.2.4 # quoted-string = [CFWS] # DQUOTE *([FWS] qcontent) [FWS] DQUOTE # [CFWS] # # qcontent = qtext / quoted-pair # Quoted pair if token == Char.BACKSLASH: context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) # Inside a quoted string, spaces are allow as regular # characters. It's only FWS if we include HTAB or CRLF elif token in [Char.CR, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break # http://tools.ietf.org/html/rfc5322#section-3.2.2 # Runs of FWS, comment, or CFWS that occur between # lexical tokens in a structured header field are # semantically interpreted as a single space # character. # http://tools.ietf.org/html/rfc5322#section-3.2.4 # the CRLF in any FWS/CFWS that appears within the # quoted string [is] semantically "invisible" and # therefore not part of the quoted-string parse_data[Context.LOCALPART] += Char.SP atom_list[Context.LOCALPART][element_count] += Char.SP element_len += 1 return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # End of quoted string elif token == Char.DQUOTE: parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 context_prior = context context = context_stack.pop() # qtext else: # http://tools.ietf.org/html/rfc5322#section-3.2.4 # qtext = %d33 / ; Printable US-ASCII # %d35-91 / ; characters not # %d93-126 / ; including "\" or # obs-qtext ; the quote # ; character # # obs-qtext = obs-NO-WS-CTL # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do # %d12 / ; not include the CR, # %d14-31 / ; LF, and white space # %d127 ; characters o = ord(token) if o > 127 or o == 0 or o == 10: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_QTEXT')) elif o < 32 or o == 127: return_status.append( DeprecatedDiagnosis('QTEXT')) parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token element_len += 1 # ------------------------------------------------------- # Quoted pair # ------------------------------------------------------- elif context == Context.QUOTEDPAIR: # http://tools.ietf.org/html/rfc5322#section-3.2.1 # quoted-pair = ("\" (VCHAR / WSP)) / obs-qp # # VCHAR = %d33-126 ; visible (printing) # ; characters # # WSP = SP / HTAB ; white space # # obs-qp = "\" (%d0 / obs-NO-WS-CTL / LF / CR) # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that do not # %d12 / ; include the carriage # %d14-31 / ; return, line feed, and # %d127 ; white space characters # # i.e. obs-qp = "\" (%d0-8, %d10-31 / %d127) o = ord(token) if o > 127: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_QPAIR')) elif (o < 31 and o != 9) or o == 127: # SP & HTAB are allowed return_status.append(DeprecatedDiagnosis('QP')) # At this point we know where this qpair occurred so # we could check to see if the character actually # needed to be quoted at all. # http://tools.ietf.org/html/rfc5321#section-4.1.2 # the sending system SHOULD transmit the # form that uses the minimum quoting possible. context_prior = context context = context_stack.pop() # End of qpair token = Char.BACKSLASH + token if context == Context.COMMENT: pass elif context == Context.QUOTEDSTRING: parse_data[Context.LOCALPART] += token atom_list[Context.LOCALPART][element_count] += token # The maximum sizes specified by RFC 5321 are octet # counts, so we must include the backslash element_len += 2 elif context == Context.LITERAL: parse_data[Context.DOMAIN] += token atom_list[Context.DOMAIN][element_count] += token # The maximum sizes specified by RFC 5321 are octet # counts, so we must include the backslash element_len += 2 else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False # ------------------------------------------------------- # Comment # ------------------------------------------------------- elif context == Context.COMMENT: # http://tools.ietf.org/html/rfc5322#section-3.2.2 # comment = "(" *([FWS] ccontent) [FWS] ")" # # ccontent = ctext / quoted-pair / comment # Nested comment if token == Char.OPENPARENTHESIS: # Nested comments are OK context_stack.append(context) context = Context.COMMENT # End of comment elif token == Char.CLOSEPARENTHESIS: context_prior = context context = context_stack.pop() # Quoted pair elif token == Char.BACKSLASH: context_stack.append(context) context = Context.QUOTEDPAIR # Folding White Space (FWS) elif token in [Char.CR, Char.SP, Char.HTAB]: # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append( InvalidDiagnosis('CR_NO_LF')) break return_status.append(CFWSDiagnosis('FWS')) context_stack.append(context) context = Context.FWS token_prior = token # ctext else: # http://tools.ietf.org/html/rfc5322#section-3.2.3 # ctext = %d33-39 / ; Printable US- # %d42-91 / ; ASCII characters # %d93-126 / ; not including # obs-ctext ; "(", ")", or "\" # # obs-ctext = obs-NO-WS-CTL # # obs-NO-WS-CTL = %d1-8 / ; US-ASCII control # %d11 / ; characters that # %d12 / ; do not include # %d14-31 / ; the CR, LF, and # ; white space # ; characters o = ord(token) if o > 127 or o == 0 or o == 10: # Fatal error return_status.append( InvalidDiagnosis('EXPECTING_CTEXT')) break elif o < 32 or o == 127: return_status.append(DeprecatedDiagnosis('CTEXT')) # ------------------------------------------------------- # Folding White Space (FWS) # ------------------------------------------------------- elif context == Context.FWS: # http://tools.ietf.org/html/rfc5322#section-3.2.2 # FWS = ([*WSP CRLF] 1*WSP) / obs-FWS # ; Folding white space # # But note the erratum: # http://www.rfc-editor.org/errata_search.php?rfc=5322&eid=1908 # In the obsolete syntax, any amount of folding white # space MAY be inserted where the obs-FWS rule is # allowed. This creates the possibility of having two # consecutive "folds" in a line, and therefore the # possibility that a line which makes up a folded header # field could be composed entirely of white space. # # obs-FWS = 1*([CRLF] WSP) if token_prior == Char.CR: if token == Char.CR: # Fatal error return_status.append( InvalidDiagnosis('FWS_CRLF_X2')) break if crlf_count != -1: crlf_count += 1 if crlf_count > 1: # Multiple folds = obsolete FWS return_status.append( DeprecatedDiagnosis('FWS')) else: crlf_count = 1 # Skip simulates the use of ++ operator if the latter # check doesn't short-circuit if token == Char.CR: skip = True if (i+1 == raw_length or to_char(address[i+1]) != Char.LF): return_status.append(InvalidDiagnosis('CR_NO_LF')) break elif token in [Char.SP, Char.HTAB]: pass else: if token_prior == Char.CR: # Fatal error return_status.append( InvalidDiagnosis('FWS_CRLF_END')) break if crlf_count != -1: crlf_count = -1 context_prior = context # End of FWS context = context_stack.pop() # Look at this token again in the parent context repeat = True token_prior = token # ------------------------------------------------------- # A context we aren't expecting # ------------------------------------------------------- else: # pragma: no cover if diagnose: return InvalidDiagnosis('BAD_PARSE') else: return False # No point in going on if we've got a fatal error if max(return_status) > BaseDiagnosis.CATEGORIES['RFC5322']: break # Some simple final tests if max(return_status) < BaseDiagnosis.CATEGORIES['RFC5322']: if context == Context.QUOTEDSTRING: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDQUOTEDSTR')) elif context == Context.QUOTEDPAIR: # Fatal error return_status.append(InvalidDiagnosis('BACKSLASHEND')) elif context == Context.COMMENT: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDCOMMENT')) elif context == Context.LITERAL: # Fatal error return_status.append(InvalidDiagnosis('UNCLOSEDDOMLIT')) elif token == Char.CR: # Fatal error return_status.append(InvalidDiagnosis('FWS_CRLF_END')) elif parse_data[Context.DOMAIN] == '': # Fatal error return_status.append(InvalidDiagnosis('NODOMAIN')) elif element_len == 0: # Fatal error return_status.append(InvalidDiagnosis('DOT_END')) elif hyphen_flag: # Fatal error return_status.append(InvalidDiagnosis('DOMAINHYPHENEND')) # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.2 # The maximum total length of a domain name or number is 255 octets elif len(parse_data[Context.DOMAIN]) > 255: return_status.append(RFC5322Diagnosis('DOMAIN_TOOLONG')) # http://tools.ietf.org/html/rfc5321#section-4.1.2 # Forward-path = Path # # Path = "<" [ A-d-l ":" ] Mailbox ">" # # http://tools.ietf.org/html/rfc5321#section-4.5.3.1.3 # The maximum total length of a reverse-path or forward-path is # 256 octets (including the punctuation and element separators). # # Thus, even without (obsolete) routing information, the Mailbox # can only be 254 characters long. This is confirmed by this # verified erratum to RFC 3696: # # http://www.rfc-editor.org/errata_search.php?rfc=3696&eid=1690 # However, there is a restriction in RFC 2821 on the length of an # address in MAIL and RCPT commands of 254 characters. Since # addresses that do not fit in those fields are not normally # useful, the upper limit on address lengths should normally be # considered to be 254. elif len(parse_data[Context.LOCALPART] + Char.AT + parse_data[Context.DOMAIN]) > 254: return_status.append(RFC5322Diagnosis('TOOLONG')) # http://tools.ietf.org/html/rfc1035#section-2.3.4 # labels 63 octets or less elif element_len > 63: return_status.append(RFC5322Diagnosis('LABEL_TOOLONG')) return_status = list(set(return_status)) final_status = max(return_status) if len(return_status) != 1: # Remove redundant ValidDiagnosis return_status.pop(0) parse_data['status'] = return_status if final_status < threshold: final_status = ValidDiagnosis() if diagnose: return final_status else: return final_status < BaseDiagnosis.CATEGORIES['THRESHOLD']
Check that an address address conforms to RFCs 5321, 5322 and others. More specifically, see the follow RFCs: * http://tools.ietf.org/html/rfc5321 * http://tools.ietf.org/html/rfc5322 * http://tools.ietf.org/html/rfc4291#section-2.2 * http://tools.ietf.org/html/rfc1123#section-2.1 * http://tools.ietf.org/html/rfc3696) (guidance only) Keyword arguments: address -- address to check. diagnose -- flag to report a diagnosis or a boolean (default False)
entailment
def load_freesurfer_label(annot_input, label_name, cortex=None): """ Get source node list for a specified freesurfer label. Inputs ------- annot_input : freesurfer annotation label file label_name : freesurfer label name cortex : not used """ if cortex is not None: print("Warning: cortex is not used to load the freesurfer label") labels, color_table, names = nib.freesurfer.read_annot(annot_input) names = [i.decode('utf-8') for i in names] label_value = names.index(label_name) label_nodes = np.array(np.where(np.in1d(labels, label_value)), dtype=np.int32) return label_nodes
Get source node list for a specified freesurfer label. Inputs ------- annot_input : freesurfer annotation label file label_name : freesurfer label name cortex : not used
entailment
def get_freesurfer_label(annot_input, verbose = True): """ Print freesurfer label names. """ labels, color_table, names = nib.freesurfer.read_annot(annot_input) if verbose: print(names) return names
Print freesurfer label names.
entailment
def viz(coords, faces, stat_map=None, elev=0, azim=0, cmap='coolwarm', threshold=None, alpha='auto', bg_map=None, bg_on_stat=False, figsize=None, **kwargs): ''' Visualize results on cortical surface using matplotlib. Inputs ------- coords : numpy array of shape (n_nodes,3), each row specifying the x,y,z coordinates of one node of surface mesh faces : numpy array of shape (n_faces, 3), each row specifying the indices of the three nodes building one node of the surface mesh stat_map : numpy array of shape (n_nodes,) containing the values to be visualized for each node. elev, azim : integers, elevation and azimuth parameters specifying the view on the 3D plot. For Freesurfer surfaces elev=0, azim=0 will give a lateral view for the right and a medial view for the left hemisphere, elev=0, azim=180 will give a medial view for the right and lateral view for the left hemisphere. cmap : Matplotlib colormap, the color range will me forced to be symmetric. Colormaps can be specified as string or colormap object. threshold : float, threshold to be applied to the map, will be applied in positive and negative direction, i.e. values < -abs(threshold) and > abs(threshold) will be shown. alpha : float, determines the opacity of the background mesh, in'auto' mode alpha defaults to .5 when no background map is given, to 1 otherwise. bg_map : numpy array of shape (n_nodes,) to be plotted underneath the statistical map. Specifying a sulcal depth map as bg_map results in realistic shadowing of the surface. bg_on_stat : boolean, specifies whether the statistical map should be multiplied with the background map for shadowing. Otherwise, only areas that are not covered by the statsitical map after thresholding will show shadows. figsize : tuple of intergers, dimensions of the figure that is produced. Output ------ Matplotlib figure object ''' import numpy as np import matplotlib.pyplot as plt import matplotlib.tri as tri from mpl_toolkits.mplot3d import Axes3D # load mesh and derive axes limits faces = np.array(faces, dtype=int) limits = [coords.min(), coords.max()] # set alpha if in auto mode if alpha == 'auto': if bg_map is None: alpha = .5 else: alpha = 1 # if cmap is given as string, translate to matplotlib cmap if type(cmap) == str: cmap = plt.cm.get_cmap(cmap) # initiate figure and 3d axes if figsize is not None: fig = plt.figure(figsize=figsize) else: fig = plt.figure() ax = fig.add_subplot(111, projection='3d', xlim=limits, ylim=limits) ax.view_init(elev=elev, azim=azim) ax.set_axis_off() # plot mesh without data p3dcollec = ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=faces, linewidth=0., antialiased=False, color='white') # If depth_map and/or stat_map are provided, map these onto the surface # set_facecolors function of Poly3DCollection is used as passing the # facecolors argument to plot_trisurf does not seem to work if bg_map is not None or stat_map is not None: face_colors = np.ones((faces.shape[0], 4)) face_colors[:, :3] = .5*face_colors[:, :3] if bg_map is not None: bg_data = bg_map if bg_data.shape[0] != coords.shape[0]: raise ValueError('The bg_map does not have the same number ' 'of vertices as the mesh.') bg_faces = np.mean(bg_data[faces], axis=1) bg_faces = bg_faces - bg_faces.min() bg_faces = bg_faces / bg_faces.max() face_colors = plt.cm.gray_r(bg_faces) # modify alpha values of background face_colors[:, 3] = alpha*face_colors[:, 3] if stat_map is not None: stat_map_data = stat_map stat_map_faces = np.mean(stat_map_data[faces], axis=1) # Ensure symmetric colour range, based on Nilearn helper function: # https://github.com/nilearn/nilearn/blob/master/nilearn/plotting/img_plotting.py#L52 vmax = max(-np.nanmin(stat_map_faces), np.nanmax(stat_map_faces)) vmin = -vmax if threshold is not None: kept_indices = np.where(abs(stat_map_faces) >= threshold)[0] stat_map_faces = stat_map_faces - vmin stat_map_faces = stat_map_faces / (vmax-vmin) if bg_on_stat: face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) * face_colors[kept_indices] else: face_colors[kept_indices] = cmap(stat_map_faces[kept_indices]) else: stat_map_faces = stat_map_faces - vmin stat_map_faces = stat_map_faces / (vmax-vmin) if bg_on_stat: face_colors = cmap(stat_map_faces) * face_colors else: face_colors = cmap(stat_map_faces) p3dcollec.set_facecolors(face_colors) return fig, ax
Visualize results on cortical surface using matplotlib. Inputs ------- coords : numpy array of shape (n_nodes,3), each row specifying the x,y,z coordinates of one node of surface mesh faces : numpy array of shape (n_faces, 3), each row specifying the indices of the three nodes building one node of the surface mesh stat_map : numpy array of shape (n_nodes,) containing the values to be visualized for each node. elev, azim : integers, elevation and azimuth parameters specifying the view on the 3D plot. For Freesurfer surfaces elev=0, azim=0 will give a lateral view for the right and a medial view for the left hemisphere, elev=0, azim=180 will give a medial view for the right and lateral view for the left hemisphere. cmap : Matplotlib colormap, the color range will me forced to be symmetric. Colormaps can be specified as string or colormap object. threshold : float, threshold to be applied to the map, will be applied in positive and negative direction, i.e. values < -abs(threshold) and > abs(threshold) will be shown. alpha : float, determines the opacity of the background mesh, in'auto' mode alpha defaults to .5 when no background map is given, to 1 otherwise. bg_map : numpy array of shape (n_nodes,) to be plotted underneath the statistical map. Specifying a sulcal depth map as bg_map results in realistic shadowing of the surface. bg_on_stat : boolean, specifies whether the statistical map should be multiplied with the background map for shadowing. Otherwise, only areas that are not covered by the statsitical map after thresholding will show shadows. figsize : tuple of intergers, dimensions of the figure that is produced. Output ------ Matplotlib figure object
entailment
def surf_keep_cortex(surf, cortex): """ Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex. Inputs ------- surf : Tuple containing two numpy arrays of shape (n_nodes,3). Each node of the first array specifies the x, y, z coordinates one node of the surface mesh. Each node of the second array specifies the indices of the three nodes building one triangle of the surface mesh. (e.g. the output from nibabel.freesurfer.io.read_geometry) cortex : Array with indices of vertices included in within the cortex. (e.g. the output from nibabel.freesurfer.io.read_label) """ # split surface into vertices and triangles vertices, triangles = surf # keep only the vertices within the cortex label cortex_vertices = np.array(vertices[cortex], dtype=np.float64) # keep only the triangles within the cortex label cortex_triangles = triangles_keep_cortex(triangles, cortex) return cortex_vertices, cortex_triangles
Remove medial wall from cortical surface to ensure that shortest paths are only calculated through the cortex. Inputs ------- surf : Tuple containing two numpy arrays of shape (n_nodes,3). Each node of the first array specifies the x, y, z coordinates one node of the surface mesh. Each node of the second array specifies the indices of the three nodes building one triangle of the surface mesh. (e.g. the output from nibabel.freesurfer.io.read_geometry) cortex : Array with indices of vertices included in within the cortex. (e.g. the output from nibabel.freesurfer.io.read_label)
entailment
def triangles_keep_cortex(triangles, cortex): """ Remove triangles with nodes not contained in the cortex label array """ # for or each face/triangle keep only those that only contain nodes within the list of cortex nodes input_shape = triangles.shape triangle_is_in_cortex = np.all(np.reshape(np.in1d(triangles.ravel(), cortex), input_shape), axis=1) cortex_triangles_old = np.array(triangles[triangle_is_in_cortex], dtype=np.int32) # reassign node index before outputting triangles new_index = np.digitize(cortex_triangles_old.ravel(), cortex, right=True) cortex_triangles = np.array(np.arange(len(cortex))[new_index].reshape(cortex_triangles_old.shape), dtype=np.int32) return cortex_triangles
Remove triangles with nodes not contained in the cortex label array
entailment
def translate_src(src, cortex): """ Convert source nodes to new surface (without medial wall). """ src_new = np.array(np.where(np.in1d(cortex, src))[0], dtype=np.int32) return src_new
Convert source nodes to new surface (without medial wall).
entailment
def recort(input_data, surf, cortex): """ Return data values to space of full cortex (including medial wall), with medial wall equal to zero. """ data = np.zeros(len(surf[0])) data[cortex] = input_data return data
Return data values to space of full cortex (including medial wall), with medial wall equal to zero.
entailment
def find_node_match(simple_vertices, complex_vertices): """ Thanks to juhuntenburg. Functions taken from https://github.com/juhuntenburg/brainsurfacescripts Finds those points on the complex mesh that correspond best to the simple mesh while forcing a one-to-one mapping. """ import scipy.spatial # make array for writing in final voronoi seed indices voronoi_seed_idx = np.zeros((simple_vertices.shape[0],), dtype='int64')-1 missing = np.where(voronoi_seed_idx == -1)[0].shape[0] mapping_single = np.zeros_like(voronoi_seed_idx) neighbours = 0 col = 0 while missing != 0: neighbours += 100 # find nearest neighbours inaccuracy, mapping = scipy.spatial.KDTree( complex_vertices).query(simple_vertices, k=neighbours) # go through columns of nearest neighbours until unique mapping is # achieved, if not before end of neighbours, extend number of # neighbours while col < neighbours: # find all missing voronoi seed indices missing_idx = np.where(voronoi_seed_idx == -1)[0] missing = missing_idx.shape[0] if missing == 0: break else: # for missing entries fill in next neighbour mapping_single[missing_idx] = np.copy( mapping[missing_idx, col]) # find unique values in mapping_single unique, double_idx = np.unique( mapping_single, return_inverse=True) # empty voronoi seed index voronoi_seed_idx = np.zeros( (simple_vertices.shape[0],), dtype='int64')-1 # fill voronoi seed idx with unique values for u in range(unique.shape[0]): # find the indices of this value in mapping entries = np.where(double_idx == u)[0] # set the first entry to the value voronoi_seed_idx[entries[0]] = unique[u] # go to next column col += 1 return voronoi_seed_idx, inaccuracy
Thanks to juhuntenburg. Functions taken from https://github.com/juhuntenburg/brainsurfacescripts Finds those points on the complex mesh that correspond best to the simple mesh while forcing a one-to-one mapping.
entailment
def parse(self): """ Retreive and parse Event Summary report for the given :py:class:`nhlscrapi.games.game.GameKey` :returns: ``self`` on success, ``None`` otherwise """ try: return super(EventSummRep, self).parse() \ .parse_away_shots() \ .parse_home_shots() \ .parse_away_fo() \ .parse_home_fo() \ .parse_away_by_player() \ .parse_home_by_player() except: return None
Retreive and parse Event Summary report for the given :py:class:`nhlscrapi.games.game.GameKey` :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_home_shots(self): """ Parse shot info for home team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_shot_tables() self.shots['home'] = self.__parse_shot_tables( self.__home_top, self.__home_bot ) return self except: return None
Parse shot info for home team. :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_away_shots(self): """ Parse shot info for away team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_shot_tables() self.shots['away'] = self.__parse_shot_tables( self.__aw_top, self.__aw_bot ) return self except: return None
Parse shot info for away team. :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_home_fo(self): """ Parse face-off info for home team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_fo_tables() self.face_offs['home'] = self.__parse_fo_table(self.__home_fo) return self except: return None
Parse face-off info for home team. :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_away_fo(self): """ Parse face-off info for away team. :returns: ``self`` on success, ``None`` otherwise """ try: self.__set_fo_tables() self.face_offs['away'] = self.__parse_fo_table(self.__away_fo) return self except: return None
Parse face-off info for away team. :returns: ``self`` on success, ``None`` otherwise
entailment
def dist_calc(surf, cortex, source_nodes): """ Calculate exact geodesic distance along cortical surface from set of source nodes. "dist_type" specifies whether to calculate "min", "mean", "median", or "max" distance values from a region-of-interest. If running only on single node, defaults to "min". """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) translated_source_nodes = translate_src(source_nodes, cortex) data = gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes) dist = recort(data, surf, cortex) del data return dist
Calculate exact geodesic distance along cortical surface from set of source nodes. "dist_type" specifies whether to calculate "min", "mean", "median", or "max" distance values from a region-of-interest. If running only on single node, defaults to "min".
entailment
def zone_calc(surf, cortex, src): """ Calculate closest nodes to each source node using exact geodesic distance along the cortical surface. """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) dist_vals = np.zeros((len(source_nodes), len(cortex_vertices))) for x in range(len(source_nodes)): translated_source_nodes = translate_src(source_nodes[x], cortex) dist_vals[x, :] = gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes) data = np.argsort(dist_vals, axis=0)[0, :] + 1 zone = recort(data, surf, cortex) del data return zone
Calculate closest nodes to each source node using exact geodesic distance along the cortical surface.
entailment
def dist_calc_matrix(surf, cortex, labels, exceptions = ['Unknown', 'Medial_wall'], verbose = True): """ Calculate exact geodesic distance along cortical surface from set of source nodes. "labels" specifies the freesurfer label file to use. All values will be used other than those specified in "exceptions" (default: 'Unknown' and 'Medial_Wall'). returns: dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels rois: label names in order of n """ cortex_vertices, cortex_triangles = surf_keep_cortex(surf, cortex) # remove exceptions from label list: label_list = sd.load.get_freesurfer_label(labels, verbose = False) rs = np.where([a not in exceptions for a in label_list])[0] rois = [label_list[r] for r in rs] if verbose: print("# of regions: " + str(len(rois))) # calculate distance from each region to all nodes: dist_roi = [] for roi in rois: source_nodes = sd.load.load_freesurfer_label(labels, roi) translated_source_nodes = translate_src(source_nodes, cortex) dist_roi.append(gdist.compute_gdist(cortex_vertices, cortex_triangles, source_indices = translated_source_nodes)) if verbose: print(roi) dist_roi = np.array(dist_roi) # Calculate min distance per region: dist_mat = [] for roi in rois: source_nodes = sd.load.load_freesurfer_label(labels, roi) translated_source_nodes = translate_src(source_nodes, cortex) dist_mat.append(np.min(dist_roi[:,translated_source_nodes], axis = 1)) dist_mat = np.array(dist_mat) return dist_mat, rois
Calculate exact geodesic distance along cortical surface from set of source nodes. "labels" specifies the freesurfer label file to use. All values will be used other than those specified in "exceptions" (default: 'Unknown' and 'Medial_Wall'). returns: dist_mat: symmetrical nxn matrix of minimum distance between pairs of labels rois: label names in order of n
entailment
def parse(self): """ Retreive and parse Play by Play data for the given nhlscrapi.GameKey :returns: ``self`` on success, ``None`` otherwise """ try: return ( super(FaceOffRep, self).parse() and self.parse_home_face_offs() and self.parse_away_face_offs() ) except: return None
Retreive and parse Play by Play data for the given nhlscrapi.GameKey :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_home_face_offs(self): """ Parse only the home faceoffs :returns: ``self`` on success, ``None`` otherwise """ self.__set_team_docs() self.face_offs['home'] = FaceOffRep.__read_team_doc(self.__home_doc) return self
Parse only the home faceoffs :returns: ``self`` on success, ``None`` otherwise
entailment
def parse_away_face_offs(self): """ Parse only the away faceoffs :returns: ``self`` on success, ``None`` otherwise """ self.__set_team_docs() self.face_offs['away'] = FaceOffRep.__read_team_doc(self.__vis_doc) return self
Parse only the away faceoffs :returns: ``self`` on success, ``None`` otherwise
entailment
def load_module(filename): """ Loads a module by filename """ basename = os.path.basename(filename) path = os.path.dirname(filename) sys.path.append(path) # TODO(tlan) need to figure out how to handle errors thrown here return __import__(os.path.splitext(basename)[0])
Loads a module by filename
entailment
def make_machine_mapping(machine_list): """ Convert the machine list argument from a list of names into a mapping of logical names to physical hosts. This is similar to the _parse_configs function but separated to provide the opportunity for extension and additional checking of machine access """ if machine_list is None: return {} else: mapping = {} for pair in machine_list: if (constants.MACHINE_SEPARATOR not in pair) or (pair.count(constants.MACHINE_SEPARATOR) != 1): raise ValueError("machine pairs must be passed as two strings separted by a %s", constants.MACHINE_SEPARATOR) (logical, physical) = pair.split(constants.MACHINE_SEPARATOR) # add checks for reachability mapping[logical] = physical return mapping
Convert the machine list argument from a list of names into a mapping of logical names to physical hosts. This is similar to the _parse_configs function but separated to provide the opportunity for extension and additional checking of machine access
entailment
def parse_config_list(config_list): """ Parse a list of configuration properties separated by '=' """ if config_list is None: return {} else: mapping = {} for pair in config_list: if (constants.CONFIG_SEPARATOR not in pair) or (pair.count(constants.CONFIG_SEPARATOR) != 1): raise ValueError("configs must be passed as two strings separted by a %s", constants.CONFIG_SEPARATOR) (config, value) = pair.split(constants.CONFIG_SEPARATOR) mapping[config] = value return mapping
Parse a list of configuration properties separated by '='
entailment
def parse_config_file(config_file_path): """ Parse a configuration file. Currently only supports .json, .py and properties separated by '=' :param config_file_path: :return: a dict of the configuration properties """ extension = os.path.splitext(config_file_path)[1] if extension == '.pyc': raise ValueError("Skipping .pyc file as config") if extension == '.json': with open(config_file_path) as config_file: try: mapping = json.load(config_file) except ValueError as e: logger.error("Did not load json configs", e) raise SyntaxError('Unable to parse config file:%s due to malformed JSON. Aborting' %(config_file_path)) elif extension == '.py': mapping = {} file_dict = load_module(config_file_path) for attr_name in dir(file_dict): if not (attr_name.startswith('_') or attr_name.startswith('__')): attr = getattr(file_dict, attr_name) if type(attr) is dict: mapping.update(attr) else: with open(config_file_path) as config_file: lines = [line.rstrip() for line in config_file if line.rstrip() != "" and not line.startswith("#")] mapping = parse_config_list(lines) return mapping
Parse a configuration file. Currently only supports .json, .py and properties separated by '=' :param config_file_path: :return: a dict of the configuration properties
entailment
def exec_with_env(ssh, command, msg='', env={}, **kwargs): """ :param ssh: :param command: :param msg: :param env: :param synch: :return: """ bash_profile_command = "source .bash_profile > /dev/null 2> /dev/null;" env_command = build_os_environment_string(env) new_command = bash_profile_command + env_command + command if kwargs.get('sync', True): return better_exec_command(ssh, new_command, msg) else: return ssh.exec_command(new_command)
:param ssh: :param command: :param msg: :param env: :param synch: :return:
entailment
def better_exec_command(ssh, command, msg): """Uses paramiko to execute a command but handles failure by raising a ParamikoError if the command fails. Note that unlike paramiko.SSHClient.exec_command this is not asynchronous because we wait until the exit status is known :Parameter ssh: a paramiko SSH Client :Parameter command: the command to execute :Parameter msg: message to print on failure :Returns (paramiko.Channel) the underlying channel so that the caller can extract stdout or send to stdin :Raises SSHException: if paramiko would raise an SSHException :Raises ParamikoError: if the command produces output to stderr """ chan = ssh.get_transport().open_session() chan.exec_command(command) exit_status = chan.recv_exit_status() if exit_status != 0: msg_str = chan.recv_stderr(1024) err_msgs = [] while len(msg_str) > 0: err_msgs.append(msg_str) msg_str = chan.recv_stderr(1024) err_msg = ''.join(err_msgs) logger.error(err_msg) raise ParamikoError(msg, err_msg) return chan
Uses paramiko to execute a command but handles failure by raising a ParamikoError if the command fails. Note that unlike paramiko.SSHClient.exec_command this is not asynchronous because we wait until the exit status is known :Parameter ssh: a paramiko SSH Client :Parameter command: the command to execute :Parameter msg: message to print on failure :Returns (paramiko.Channel) the underlying channel so that the caller can extract stdout or send to stdin :Raises SSHException: if paramiko would raise an SSHException :Raises ParamikoError: if the command produces output to stderr
entailment
def log_output(chan): """ logs the output from a remote command the input should be an open channel in the case of synchronous better_exec_command otherwise this will not log anything and simply return to the caller :param chan: :return: """ if hasattr(chan, "recv"): str = chan.recv(1024) msgs = [] while len(str) > 0: msgs.append(str) str = chan.recv(1024) msg = ''.join(msgs).strip() if len(msg) > 0: logger.info(msg)
logs the output from a remote command the input should be an open channel in the case of synchronous better_exec_command otherwise this will not log anything and simply return to the caller :param chan: :return:
entailment
def copy_dir(ftp, filename, outputdir, prefix, pattern=''): """ Recursively copy a directory flattens the output into a single directory but prefixes the files with the path from the original input directory :param ftp: :param filename: :param outputdir: :param prefix: :param pattern: a regex pattern for files to match (by default matches everything) :return: """ try: mode = ftp.stat(filename).st_mode except IOError, e: if e.errno == errno.ENOENT: logger.error("Log file " + filename + " does not exist") pass else: if mode & stat.S_IFREG: if re.match(pattern, filename) is not None: new_file = os.path.join(outputdir, "{0}-{1}".format(prefix, os.path.basename(filename))) ftp.get(filename, new_file) elif mode & stat.S_IFDIR: for f in ftp.listdir(filename): copy_dir(ftp, os.path.join(filename, f), outputdir, "{0}_{1}".format(prefix, os.path.basename(filename)), pattern)
Recursively copy a directory flattens the output into a single directory but prefixes the files with the path from the original input directory :param ftp: :param filename: :param outputdir: :param prefix: :param pattern: a regex pattern for files to match (by default matches everything) :return:
entailment
def open_remote_file(hostname, filename, mode='r', bufsize=-1, username=None, password=None): """ :param hostname: :param filename: :return: """ with get_ssh_client(hostname, username=username, password=password) as ssh: sftp = None f = None try: sftp = ssh.open_sftp() f = sftp.open(filename, mode, bufsize) yield f finally: if f is not None: f.close() if sftp is not None: sftp.close()
:param hostname: :param filename: :return:
entailment
def deploy(self, unique_id, configs=None): """Deploys the service to the host. This should at least perform the same actions as install and start but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a mao of configs the deployer may use to modify the deployment """ self.install(unique_id, configs) self.start(unique_id, configs)
Deploys the service to the host. This should at least perform the same actions as install and start but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a mao of configs the deployer may use to modify the deployment
entailment
def undeploy(self, unique_id, configs=None): """Undeploys the service. This should at least perform the same actions as stop and uninstall but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a map of configs the deployer may use """ self.stop(unique_id, configs) self.uninstall(unique_id, configs)
Undeploys the service. This should at least perform the same actions as stop and uninstall but may perform additional tasks as needed. :Parameter unique_id: the name of the process :Parameter configs: a map of configs the deployer may use
entailment
def soft_bounce(self, unique_id, configs=None): """ Performs a soft bounce (stop and start) for the specified process :Parameter unique_id: the name of the process """ self.stop(unique_id, configs) self.start(unique_id, configs)
Performs a soft bounce (stop and start) for the specified process :Parameter unique_id: the name of the process
entailment
def hard_bounce(self, unique_id, configs=None): """ Performs a hard bounce (kill and start) for the specified process :Parameter unique_id: the name of the process """ self.kill(unique_id, configs) self.start(unique_id, configs)
Performs a hard bounce (kill and start) for the specified process :Parameter unique_id: the name of the process
entailment
def sleep(self, unique_id, delay, configs=None): """ Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds """ self.pause(unique_id, configs) time.sleep(delay) self.resume(unique_id, configs)
Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds
entailment
def pause(self, unique_id, configs=None): """ Issues a sigstop for the specified process :Parameter unique_id: the name of the process """ pids = self.get_pid(unique_id, configs) if pids != constants.PROCESS_NOT_RUNNING_PID: pid_str = ' '.join(str(pid) for pid in pids) hostname = self.processes[unique_id].hostname with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: better_exec_command(ssh, "kill -SIGSTOP {0}".format(pid_str), "PAUSING PROCESS {0}".format(unique_id))
Issues a sigstop for the specified process :Parameter unique_id: the name of the process
entailment
def _send_signal(self, unique_id, signalno, configs): """ Issues a signal for the specified process :Parameter unique_id: the name of the process """ pids = self.get_pid(unique_id, configs) if pids != constants.PROCESS_NOT_RUNNING_PID: pid_str = ' '.join(str(pid) for pid in pids) hostname = self.processes[unique_id].hostname msg= Deployer._signalnames.get(signalno,"SENDING SIGNAL %s TO"%signalno) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: better_exec_command(ssh, "kill -{0} {1}".format(signalno, pid_str), "{0} PROCESS {1}".format(msg, unique_id))
Issues a signal for the specified process :Parameter unique_id: the name of the process
entailment
def resume(self, unique_id, configs=None): """ Issues a sigcont for the specified process :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGCONT,configs)
Issues a sigcont for the specified process :Parameter unique_id: the name of the process
entailment
def kill(self, unique_id, configs=None): """ Issues a kill -9 to the specified process calls the deployers get_pid function for the process. If no pid_file/pid_keyword is specified a generic grep of ps aux command is executed on remote machine based on process parameters which may not be reliable if more process are running with similar name :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGKILL, configs)
Issues a kill -9 to the specified process calls the deployers get_pid function for the process. If no pid_file/pid_keyword is specified a generic grep of ps aux command is executed on remote machine based on process parameters which may not be reliable if more process are running with similar name :Parameter unique_id: the name of the process
entailment
def terminate(self, unique_id, configs=None): """ Issues a kill -15 to the specified process :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGTERM, configs)
Issues a kill -15 to the specified process :Parameter unique_id: the name of the process
entailment
def hangup(self, unique_id, configs=None): """ Issue a signal to hangup the specified process :Parameter unique_id: the name of the process """ self._send_signal(unique_id, signal.SIGHUP, configs)
Issue a signal to hangup the specified process :Parameter unique_id: the name of the process
entailment
def get_logs(self, unique_id, logs, directory, pattern=constants.FILTER_NAME_ALLOW_NONE): """deprecated name for fetch_logs""" self.fetch_logs(unique_id, logs, directory, pattern)
deprecated name for fetch_logs
entailment
def fetch_logs(self, unique_id, logs, directory, pattern=constants.FILTER_NAME_ALLOW_NONE): """ Copies logs from the remote host that the process is running on to the provided directory :Parameter unique_id the unique_id of the process in question :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied """ hostname = self.processes[unique_id].hostname install_path = self.processes[unique_id].install_path self.fetch_logs_from_host(hostname, install_path, unique_id, logs, directory, pattern)
Copies logs from the remote host that the process is running on to the provided directory :Parameter unique_id the unique_id of the process in question :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied
entailment
def fetch_logs_from_host(hostname, install_path, prefix, logs, directory, pattern): """ Static method Copies logs from specified host on the specified install path :Parameter hostname the remote host from where we need to fetch the logs :Parameter install_path path where the app is installed :Parameter prefix prefix used to copy logs. Generall the unique_id of process :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied """ if hostname is not None: with get_sftp_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ftp: for f in logs: try: mode = ftp.stat(f).st_mode except IOError, e: if e.errno == errno.ENOENT: logger.error("Log file " + f + " does not exist on " + hostname) pass else: copy_dir(ftp, f, directory, prefix) if install_path is not None: copy_dir(ftp, install_path, directory, prefix, pattern)
Static method Copies logs from specified host on the specified install path :Parameter hostname the remote host from where we need to fetch the logs :Parameter install_path path where the app is installed :Parameter prefix prefix used to copy logs. Generall the unique_id of process :Parameter logs a list of logs given by absolute path from the remote host :Parameter directory the local directory to store the copied logs :Parameter pattern a pattern to apply to files to restrict the set of logs copied
entailment
def generate(self): """ Generates the report """ self._setup() for config_name in self.report_info.config_to_test_names_map.keys(): config_dir = os.path.join(self.report_info.resource_dir, config_name) utils.makedirs(config_dir) testsuite = self._generate_junit_xml(config_name) with open(os.path.join(self.report_info.junit_xml_path, 'zopkio_junit_reports.xml'), 'w') as file: TestSuite.to_file(file, [testsuite], prettyprint=False)
Generates the report
entailment
def install(self, unique_id, configs=None): """ Copies the executable to the remote machine under install path. Inspects the configs for the possible keys 'hostname': the host to install on 'install_path': the location on the remote host 'executable': the executable to copy 'no_copy': if this config is passed in and true then this method will not copy the executable assuming that it is already installed 'post_install_cmds': an optional list of commands that should be executed on the remote machine after the executable has been installed. If no_copy is set to true, then the post install commands will not be run. If the unique_id is already installed on a different host, this will perform the cleanup action first. If either 'install_path' or 'executable' are provided the new value will become the default. :param unique_id: :param configs: :return: """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp hostname = None is_tarfile = False is_zipfile = False if unique_id in self.processes and 'hostname' in configs: self.uninstall(unique_id, configs) hostname = configs['hostname'] elif 'hostname' in configs: hostname = configs['hostname'] elif unique_id not in self.processes: # we have not installed this unique_id before and no hostname is provided in the configs so raise an error raise DeploymentError("hostname was not provided for unique_id: " + unique_id) env = configs.get("env", {}) install_path = configs.get('install_path') or self.default_configs.get('install_path') pid_file = configs.get('pid_file') or self.default_configs.get('pid_file') if install_path is None: logger.error("install_path was not provided for unique_id: " + unique_id) raise DeploymentError("install_path was not provided for unique_id: " + unique_id) if not configs.get('no_copy', False): with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: log_output(better_exec_command(ssh, "mkdir -p {0}".format(install_path), "Failed to create path {0}".format(install_path))) log_output(better_exec_command(ssh, "chmod 755 {0}".format(install_path), "Failed to make path {0} writeable".format(install_path))) executable = configs.get('executable') or self.default_configs.get('executable') if executable is None: logger.error("executable was not provided for unique_id: " + unique_id) raise DeploymentError("executable was not provided for unique_id: " + unique_id) #if the executable is in remote location copy to local machine copy_from_remote_location = False; if (":" in executable): copy_from_remote_location = True if ("http" not in executable): remote_location_server = executable.split(":")[0] remote_file_path = executable.split(":")[1] remote_file_name = os.path.basename(remote_file_path) local_temp_file_name = os.path.join(configs.get("tmp_dir","/tmp"),remote_file_name) if not os.path.exists(local_temp_file_name): with get_sftp_client(remote_location_server,username=runtime.get_username(), password=runtime.get_password()) as ftp: try: ftp.get(remote_file_path, local_temp_file_name) executable = local_temp_file_name except: raise DeploymentError("Unable to load file from remote server " + executable) #use urllib for http copy else: remote_file_name = executable.split("/")[-1] local_temp_file_name = os.path.join(configs.get("tmp_dir","/tmp"),remote_file_name) if not os.path.exists(local_temp_file_name): try: urllib.urlretrieve (executable, local_temp_file_name) except: raise DeploymentError("Unable to load file from remote server " + executable) executable = local_temp_file_name try: exec_name = os.path.basename(executable) install_location = os.path.join(install_path, exec_name) with get_sftp_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ftp: ftp.put(executable, install_location) except: raise DeploymentError("Unable to copy executable to install_location:" + install_location) finally: #Track if its a tarfile or zipfile before deleting it in case the copy to remote location fails is_tarfile = tarfile.is_tarfile(executable) is_zipfile = zipfile.is_zipfile(executable) if (copy_from_remote_location and not configs.get('cache',False)): os.remove(executable) # only supports tar and zip (because those modules are provided by Python's standard library) if configs.get('extract', False) or self.default_configs.get('extract', False): if is_tarfile: log_output(better_exec_command(ssh, "tar -xf {0} -C {1}".format(install_location, install_path), "Failed to extract tarfile {0}".format(exec_name))) elif is_zipfile: log_output(better_exec_command(ssh, "unzip -o {0} -d {1}".format(install_location, install_path), "Failed to extract zipfile {0}".format(exec_name))) else: logger.error(executable + " is not a supported filetype for extracting") raise DeploymentError(executable + " is not a supported filetype for extracting") post_install_cmds = configs.get('post_install_cmds', False) or self.default_configs.get('post_install_cmds', []) for cmd in post_install_cmds: relative_cmd = "cd {0}; {1}".format(install_path, cmd) log_output(exec_with_env(ssh, relative_cmd, msg="Failed to execute post install command: {0}".format(relative_cmd), env=env)) self.processes[unique_id] = Process(unique_id, self.service_name, hostname, install_path) self.processes[unique_id].pid_file = pid_file
Copies the executable to the remote machine under install path. Inspects the configs for the possible keys 'hostname': the host to install on 'install_path': the location on the remote host 'executable': the executable to copy 'no_copy': if this config is passed in and true then this method will not copy the executable assuming that it is already installed 'post_install_cmds': an optional list of commands that should be executed on the remote machine after the executable has been installed. If no_copy is set to true, then the post install commands will not be run. If the unique_id is already installed on a different host, this will perform the cleanup action first. If either 'install_path' or 'executable' are provided the new value will become the default. :param unique_id: :param configs: :return:
entailment
def start(self, unique_id, configs=None): """ Start the service. If `unique_id` has already been installed the deployer will start the service on that host. Otherwise this will call install with the configs. Within the context of this function, only four configs are considered 'start_command': the command to run (if provided will replace the default) 'args': a list of args that can be passed to the command 'sync': if the command is synchronous or asynchronous defaults to asynchronous 'delay': a delay in seconds that might be needed regardless of whether the command returns before the service can be started :param unique_id: :param configs: :return: if the command is executed synchronously return the underlying paramiko channel which can be used to get the stdout otherwise return the triple stdin, stdout, stderr """ # the following is necessay to set the configs for this function as the combination of the # default configurations and the parameter with the parameter superceding the defaults but # not modifying the defaults if configs is None: configs = {} tmp = self.default_configs.copy() tmp.update(configs) configs = tmp logger.debug("starting " + unique_id) # do not start if already started if self.get_pid(unique_id, configs) is not constants.PROCESS_NOT_RUNNING_PID: return None if unique_id not in self.processes: self.install(unique_id, configs) hostname = self.processes[unique_id].hostname install_path = self.processes[unique_id].install_path # order of precedence for start_command and args from highest to lowest: # 1. configs # 2. from Process # 3. from Deployer start_command = configs.get('start_command') or self.processes[unique_id].start_command or self.default_configs.get('start_command') pid_file = configs.get('pid_file') or self.default_configs.get('pid_file') if start_command is None: logger.error("start_command was not provided for unique_id: " + unique_id) raise DeploymentError("start_command was not provided for unique_id: " + unique_id) args = configs.get('args') or self.processes[unique_id].args or self.default_configs.get('args') if args is not None: full_start_command = "{0} {1}".format(start_command, ' '.join(args)) else: full_start_command = start_command command = "cd {0}; {1}".format(install_path, full_start_command) env = configs.get("env", {}) with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh: exec_with_env(ssh, command, msg="Failed to start", env=env, sync=configs.get('sync', False)) self.processes[unique_id].start_command = start_command self.processes[unique_id].args = args # For cases where user pases it with start command if self.processes[unique_id].pid_file is None: self.processes[unique_id].pid_file = pid_file if 'delay' in configs: time.sleep(configs['delay'])
Start the service. If `unique_id` has already been installed the deployer will start the service on that host. Otherwise this will call install with the configs. Within the context of this function, only four configs are considered 'start_command': the command to run (if provided will replace the default) 'args': a list of args that can be passed to the command 'sync': if the command is synchronous or asynchronous defaults to asynchronous 'delay': a delay in seconds that might be needed regardless of whether the command returns before the service can be started :param unique_id: :param configs: :return: if the command is executed synchronously return the underlying paramiko channel which can be used to get the stdout otherwise return the triple stdin, stdout, stderr
entailment