sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def load_if_not_loaded(widget, filenames, verbose=False, delay=0.1, force=False, local=True, evaluator=None): """ Load a javascript file to the Jupyter notebook context, unless it was already loaded. """ if evaluator is None: evaluator = EVALUATOR # default if not specified. for filename in filenames: loaded = False if force or not filename in LOADED_JAVASCRIPT: js_text = get_text_from_file_name(filename, local) if verbose: print("loading javascript file", filename, "with", evaluator) evaluator(widget, js_text) LOADED_JAVASCRIPT.add(filename) loaded = True else: if verbose: print ("not reloading javascript file", filename) if loaded and delay > 0: if verbose: print ("delaying to allow JS interpreter to sync.") time.sleep(delay)
Load a javascript file to the Jupyter notebook context, unless it was already loaded.
entailment
def _set(self, name, value): "Proxy to set a property of the widget element." return self.widget(self.widget_element._set(name, value))
Proxy to set a property of the widget element.
entailment
def strip_outer_tag(text): """Strips the outer tag, if text starts with a tag. Not entity aware; designed to quickly strip outer tags from lxml cleaner output. Only checks for <p> and <div> outer tags.""" if not text or not isinstance(text, basestring): return text stripped = text.strip() if (stripped.startswith('<p>') or stripped.startswith('<div>')) and \ (stripped.endswith('</p>') or stripped.endswith('</div>')): return stripped[stripped.index('>')+1:stripped.rindex('<')] return text
Strips the outer tag, if text starts with a tag. Not entity aware; designed to quickly strip outer tags from lxml cleaner output. Only checks for <p> and <div> outer tags.
entailment
def munge_author(author): """If an author contains an email and a name in it, make sure it is in the format: "name (email)".""" # this loveliness is from feedparser but was not usable as a function if '@' in author: emailmatch = re.search(r"(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?", author, re.UNICODE) if emailmatch: email = emailmatch.group(0) # probably a better way to do the following, but it passes all the tests author = author.replace(email, u'') author = author.replace(u'()', u'') author = author.replace(u'<>', u'') author = author.replace(u'&lt;&gt;', u'') author = author.strip() if author and (author[0] == u'('): author = author[1:] if author and (author[-1] == u')'): author = author[:-1] author = author.strip() return '%s (%s)' % (author, email) return author
If an author contains an email and a name in it, make sure it is in the format: "name (email)".
entailment
def base_url(root): """Determine the base url for a root element.""" for attr, value in root.attrib.iteritems(): if attr.endswith('base') and 'http' in value: return value return None
Determine the base url for a root element.
entailment
def clean_ns(tag): """Return a tag and its namespace separately.""" if '}' in tag: split = tag.split('}') return split[0].strip('{'), split[-1] return '', tag
Return a tag and its namespace separately.
entailment
def xpath(node, query, namespaces={}): """A safe xpath that only uses namespaces if available.""" if namespaces and 'None' not in namespaces: return node.xpath(query, namespaces=namespaces) return node.xpath(query)
A safe xpath that only uses namespaces if available.
entailment
def innertext(node): """Return the inner text of a node. If a node has no sub elements, this is just node.text. Otherwise, it's node.text + sub-element-text + node.tail.""" if not len(node): return node.text return (node.text or '') + ''.join([etree.tostring(c) for c in node]) + (node.tail or '')
Return the inner text of a node. If a node has no sub elements, this is just node.text. Otherwise, it's node.text + sub-element-text + node.tail.
entailment
def parse(document, clean_html=True, unix_timestamp=False, encoding=None): """Parse a document and return a feedparser dictionary with attr key access. If clean_html is False, the html in the feed will not be cleaned. If clean_html is True, a sane version of lxml.html.clean.Cleaner will be used. If it is a Cleaner object, that cleaner will be used. If unix_timestamp is True, the date information will be a numerical unix timestamp rather than a struct_time. If encoding is provided, the encoding of the document will be manually set to that.""" if isinstance(clean_html, bool): cleaner = default_cleaner if clean_html else fake_cleaner else: cleaner = clean_html result = feedparser.FeedParserDict() result['feed'] = feedparser.FeedParserDict() result['entries'] = [] result['bozo'] = 0 try: parser = SpeedParser(document, cleaner, unix_timestamp, encoding) parser.update(result) except Exception as e: if isinstance(e, UnicodeDecodeError) and encoding is True: encoding = chardet.detect(document)['encoding'] document = document.decode(encoding, 'replace').encode('utf-8') return parse(document, clean_html, unix_timestamp, encoding) import traceback result['bozo'] = 1 result['bozo_exception'] = e result['bozo_tb'] = traceback.format_exc() return result
Parse a document and return a feedparser dictionary with attr key access. If clean_html is False, the html in the feed will not be cleaned. If clean_html is True, a sane version of lxml.html.clean.Cleaner will be used. If it is a Cleaner object, that cleaner will be used. If unix_timestamp is True, the date information will be a numerical unix timestamp rather than a struct_time. If encoding is provided, the encoding of the document will be manually set to that.
entailment
def parse_entry(self, entry): """An attempt to parse pieces of an entry out w/o xpath, by looping over the entry root's children and slotting them into the right places. This is going to be way messier than SpeedParserEntries, and maybe less cleanly usable, but it should be faster.""" e = feedparser.FeedParserDict() tag_map = self.tag_map nslookup = self.nslookup for child in entry.getchildren(): if isinstance(child, etree._Comment): continue ns, tag = clean_ns(child.tag) mapping = tag_map.get(tag, None) if mapping: getattr(self, 'parse_%s' % mapping)(child, e, nslookup.get(ns, ns)) if not ns: continue fulltag = '%s:%s' % (nslookup.get(ns, ''), tag) mapping = tag_map.get(fulltag, None) if mapping: getattr(self, 'parse_%s' % mapping)(child, e, nslookup[ns]) lacks_summary = 'summary' not in e or e['summary'] is None lacks_content = 'content' not in e or not bool(e.get('content', None)) if not lacks_summary and lacks_content: e['content'] = [{'value': e.summary}] # feedparser sometimes copies the first content value into the # summary field when summary was completely missing; we want # to do that as well, but avoid the case where summary was given as '' if lacks_summary and not lacks_content: e['summary'] = e['content'][0]['value'] if e.get('summary', False) is None: e['summary'] = u'' # support feed entries that have a guid but no link if 'guid' in e and 'link' not in e: e['link'] = full_href(e['guid'], self.baseurl) return e
An attempt to parse pieces of an entry out w/o xpath, by looping over the entry root's children and slotting them into the right places. This is going to be way messier than SpeedParserEntries, and maybe less cleanly usable, but it should be faster.
entailment
def changed_path(self): "Find any changed path and update all changed modification times." result = None # default for path in self.paths_to_modification_times: lastmod = self.paths_to_modification_times[path] mod = os.path.getmtime(path) if mod > lastmod: result = "Watch file has been modified: " + repr(path) self.paths_to_modification_times[path] = mod for folder in self.folder_paths: for filename in os.listdir(folder): subpath = os.path.join(folder, filename) if os.path.isfile(subpath) and subpath not in self.paths_to_modification_times: result = "New file in watched folder: " + repr(subpath) self.add(subpath) if self.check_python_modules: # refresh the modules self.add_all_modules() if self.check_javascript: self.watch_javascript() return result
Find any changed path and update all changed modification times.
entailment
def _parse_date_iso8601(dateString): '''Parse a variety of ISO-8601-compatible formats like 20040105''' m = None for _iso8601_match in _iso8601_matches: m = _iso8601_match(dateString) if m: break if not m: return if m.span() == (0, 0): return params = m.groupdict() ordinal = params.get('ordinal', 0) if ordinal: ordinal = int(ordinal) else: ordinal = 0 year = params.get('year', '--') if not year or year == '--': year = time.gmtime()[0] elif len(year) == 2: # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 year = 100 * int(time.gmtime()[0] / 100) + int(year) else: year = int(year) month = params.get('month', '-') if not month or month == '-': # ordinals are NOT normalized by mktime, we simulate them # by setting month=1, day=ordinal if ordinal: month = 1 else: month = time.gmtime()[1] month = int(month) day = params.get('day', 0) if not day: # see above if ordinal: day = ordinal elif params.get('century', 0) or \ params.get('year', 0) or params.get('month', 0): day = 1 else: day = time.gmtime()[2] else: day = int(day) # special case of the century - is the first year of the 21st century # 2000 or 2001 ? The debate goes on... if 'century' in params: year = (int(params['century']) - 1) * 100 + 1 # in ISO 8601 most fields are optional for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: if not params.get(field, None): params[field] = 0 hour = int(params.get('hour', 0)) minute = int(params.get('minute', 0)) second = int(float(params.get('second', 0))) # weekday is normalized by mktime(), we can ignore it weekday = 0 daylight_savings_flag = -1 tm = [year, month, day, hour, minute, second, weekday, ordinal, daylight_savings_flag] # ISO 8601 time zone adjustments tz = params.get('tz') if tz and tz != 'Z': if tz[0] == '-': tm[3] += int(params.get('tzhour', 0)) tm[4] += int(params.get('tzmin', 0)) elif tz[0] == '+': tm[3] -= int(params.get('tzhour', 0)) tm[4] -= int(params.get('tzmin', 0)) else: return None # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) # which is guaranteed to normalize d/m/y/h/m/s. # Many implementations have bugs, but we'll pretend they don't. return time.localtime(time.mktime(tuple(tm)))
Parse a variety of ISO-8601-compatible formats like 20040105
entailment
def _parse_date_onblog(dateString): '''Parse a string according to the OnBlog 8-bit date format''' m = _korean_onblog_date_re.match(dateString) if not m: return w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ 'zonediff': '+09:00'} return _parse_date_w3dtf(w3dtfdate)
Parse a string according to the OnBlog 8-bit date format
entailment
def _parse_date_nate(dateString): '''Parse a string according to the Nate 8-bit date format''' m = _korean_nate_date_re.match(dateString) if not m: return hour = int(m.group(5)) ampm = m.group(4) if (ampm == _korean_pm): hour += 12 hour = str(hour) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': '+09:00'} return _parse_date_w3dtf(w3dtfdate)
Parse a string according to the Nate 8-bit date format
entailment
def _parse_date_greek(dateString): '''Parse a string according to a Greek 8-bit date format.''' m = _greek_date_format_re.match(dateString) if not m: return wday = _greek_wdays[m.group(1)] month = _greek_months[m.group(3)] rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ 'zonediff': m.group(8)} return _parse_date_rfc822(rfc822date)
Parse a string according to a Greek 8-bit date format.
entailment
def _parse_date_hungarian(dateString): '''Parse a string according to a Hungarian 8-bit date format.''' m = _hungarian_date_format_re.match(dateString) if not m or m.group(2) not in _hungarian_months: return None month = _hungarian_months[m.group(2)] day = m.group(3) if len(day) == 1: day = '0' + day hour = m.group(4) if len(hour) == 1: hour = '0' + hour w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ {'year': m.group(1), 'month': month, 'day': day,\ 'hour': hour, 'minute': m.group(5),\ 'zonediff': m.group(6)} return _parse_date_w3dtf(w3dtfdate)
Parse a string according to a Hungarian 8-bit date format.
entailment
def _parse_date_rfc822(dateString): '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' data = dateString.split() if not data: return None if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: del data[0] if len(data) == 4: s = data[3] i = s.find('+') if i > 0: data[3:] = [s[:i], s[i+1:]] else: data.append('') dateString = " ".join(data) # Account for the Etc/GMT timezone by stripping 'Etc/' elif len(data) == 5 and data[4].lower().startswith('etc/'): data[4] = data[4][4:] dateString = " ".join(data) if len(data) < 5: dateString += ' 00:00:00 GMT' tm = rfc822.parsedate_tz(dateString) if tm: # Jython doesn't adjust for 2-digit years like CPython does, # so account for it by shifting the year so that it's in the # range 1970-2069 (1970 being the year of the Unix epoch). if tm[0] < 100: tm = (tm[0] + (1900, 2000)[tm[0] < 70],) + tm[1:] return time.gmtime(rfc822.mktime_tz(tm))
Parse an RFC822, RFC1123, RFC2822, or asctime-style date
entailment
def _parse_date_perforce(aDateString): """parse a date in yyyy/mm/dd hh:mm:ss TTT format""" # Fri, 2006/09/15 08:19:53 EDT _my_date_pattern = re.compile( \ r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})') m = _my_date_pattern.search(aDateString) if m is None: return None dow, year, month, day, hour, minute, second, tz = m.groups() months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz) tm = rfc822.parsedate_tz(dateString) if tm: return time.gmtime(rfc822.mktime_tz(tm))
parse a date in yyyy/mm/dd hh:mm:ss TTT format
entailment
def parse_date(dateString): '''Parses a variety of date formats into a 9-tuple in GMT''' if not dateString: return None for handler in _date_handlers: try: date9tuple = handler(dateString) except (KeyError, OverflowError, ValueError): continue if not date9tuple: continue if len(date9tuple) != 9: continue return date9tuple return None
Parses a variety of date formats into a 9-tuple in GMT
entailment
def handle_chunk_wrapper(self, status, name, content, file_info): """wrapper to allow output redirects for handle_chunk.""" out = self.output if out is not None: with out: print("handling chunk " + repr(type(content))) self.handle_chunk(status, name, content, file_info) else: self.handle_chunk(status, name, content, file_info)
wrapper to allow output redirects for handle_chunk.
entailment
def handle_chunk(self, status, name, content, file_info): "Handle one chunk of the file. Override this method for peicewise delivery or error handling." if status == "error": msg = repr(file_info.get("message")) exc = JavaScriptError(msg) exc.file_info = file_info self.status = "Javascript sent exception " + msg self.chunk_collector = [] raise exc if status == "more": self.chunk_collector.append(content) self.progress_callback(self.chunk_collector, file_info) else: assert status == "done", "Unknown status " + repr(status) self.save_chunks = self.chunk_collector self.chunk_collector.append(content) all_content = self.combine_chunks(self.chunk_collector) self.chunk_collector = [] content_callback = self.content_callback if content_callback is None: content_callback = self.default_content_callback self.status = "calling " + repr(content_callback) try: content_callback(self.widget, name, all_content) except Exception as e: self.status += "\n" + repr(content_callback) + " raised " + repr(e) raise
Handle one chunk of the file. Override this method for peicewise delivery or error handling.
entailment
def get_login_url(self, scope, redirect_uri, state=None, family_names=None, given_names=None, email=None, lang=None, show_login=None): """Return a URL for a user to login/register with ORCID. Parameters ---------- :param scope: string or iterable of strings The scope(s) of the authorization request. For example '/authenticate' :param redirect_uri: string The URI to which the user's browser should be redirected after the login. :param state: string An arbitrary token to prevent CSRF. See the OAuth 2 docs for details. :param family_names: string The user's family name, used to fill the registration form. :param given_names: string The user's given name, used to fill the registration form. :param email: string The user's email address, used to fill the sign-in or registration form. :param lang: string The language in which to display the authorization page. :param show_login: bool Determines whether the log-in or registration form will be shown by default. Returns ------- :returns: string The URL ready to be offered as a link to the user. """ if not isinstance(scope, string_types): scope = " ".join(sorted(set(scope))) data = [("client_id", self._key), ("scope", scope), ("response_type", "code"), ("redirect_uri", redirect_uri)] if state: data.append(("state", state)) if family_names: data.append(("family_names", family_names.encode("utf-8"))) if given_names: data.append(("given_names", given_names.encode("utf-8"))) if email: data.append(("email", email)) if lang: data.append(("lang", lang)) if show_login is not None: data.append(("show_login", "true" if show_login else "false")) return self._login_or_register_endpoint + "?" + urlencode(data)
Return a URL for a user to login/register with ORCID. Parameters ---------- :param scope: string or iterable of strings The scope(s) of the authorization request. For example '/authenticate' :param redirect_uri: string The URI to which the user's browser should be redirected after the login. :param state: string An arbitrary token to prevent CSRF. See the OAuth 2 docs for details. :param family_names: string The user's family name, used to fill the registration form. :param given_names: string The user's given name, used to fill the registration form. :param email: string The user's email address, used to fill the sign-in or registration form. :param lang: string The language in which to display the authorization page. :param show_login: bool Determines whether the log-in or registration form will be shown by default. Returns ------- :returns: string The URL ready to be offered as a link to the user.
entailment
def search(self, query, method="lucene", start=None, rows=None, access_token=None): """Search the ORCID database. Parameters ---------- :param query: string Query in line with the chosen method. :param method: string One of 'lucene', 'edismax', 'dismax' :param start: string Index of the first record requested. Use for pagination. :param rows: string Number of records requested. Use for pagination. :param access_token: string If obtained before, the access token to use to pass through authorization. Note that if this argument is not provided, the function will take more time. Returns ------- :returns: dict Search result with error description available. The results can be obtained by accessing key 'result'. To get the number of all results, access the key 'num-found'. """ if access_token is None: access_token = self. \ get_search_token_from_orcid() headers = {'Accept': 'application/orcid+json', 'Authorization': 'Bearer %s' % access_token} return self._search(query, method, start, rows, headers, self._endpoint)
Search the ORCID database. Parameters ---------- :param query: string Query in line with the chosen method. :param method: string One of 'lucene', 'edismax', 'dismax' :param start: string Index of the first record requested. Use for pagination. :param rows: string Number of records requested. Use for pagination. :param access_token: string If obtained before, the access token to use to pass through authorization. Note that if this argument is not provided, the function will take more time. Returns ------- :returns: dict Search result with error description available. The results can be obtained by accessing key 'result'. To get the number of all results, access the key 'num-found'.
entailment
def search_generator(self, query, method="lucene", pagination=10, access_token=None): """Search the ORCID database with a generator. The generator will yield every result. Parameters ---------- :param query: string Query in line with the chosen method. :param method: string One of 'lucene', 'edismax', 'dismax' :param pagination: integer How many papers should be fetched with the request. :param access_token: string If obtained before, the access token to use to pass through authorization. Note that if this argument is not provided, the function will take more time. Yields ------- :yields: dict Single profile from the search results. """ if access_token is None: access_token = self. \ get_search_token_from_orcid() headers = {'Accept': 'application/orcid+json', 'Authorization': 'Bearer %s' % access_token} index = 0 while True: paginated_result = self._search(query, method, index, pagination, headers, self._endpoint) if not paginated_result['result']: return for result in paginated_result['result']: yield result index += pagination
Search the ORCID database with a generator. The generator will yield every result. Parameters ---------- :param query: string Query in line with the chosen method. :param method: string One of 'lucene', 'edismax', 'dismax' :param pagination: integer How many papers should be fetched with the request. :param access_token: string If obtained before, the access token to use to pass through authorization. Note that if this argument is not provided, the function will take more time. Yields ------- :yields: dict Single profile from the search results.
entailment
def get_search_token_from_orcid(self, scope='/read-public'): """Get a token for searching ORCID records. Parameters ---------- :param scope: string /read-public or /read-member Returns ------- :returns: string The token. """ payload = {'client_id': self._key, 'client_secret': self._secret, 'scope': scope, 'grant_type': 'client_credentials' } url = "%s/oauth/token" % self._endpoint headers = {'Accept': 'application/json'} response = requests.post(url, data=payload, headers=headers, timeout=self._timeout) response.raise_for_status() if self.do_store_raw_response: self.raw_response = response return response.json()['access_token']
Get a token for searching ORCID records. Parameters ---------- :param scope: string /read-public or /read-member Returns ------- :returns: string The token.
entailment
def get_token(self, user_id, password, redirect_uri, scope='/read-limited'): """Get the token. Parameters ---------- :param user_id: string The id of the user used for authentication. :param password: string The user password. :param redirect_uri: string The redirect uri of the institution. :param scope: string The desired scope. For example '/activities/update', '/read-limited', etc. Returns ------- :returns: string The token. """ response = self._authenticate(user_id, password, redirect_uri, scope) return response['access_token']
Get the token. Parameters ---------- :param user_id: string The id of the user used for authentication. :param password: string The user password. :param redirect_uri: string The redirect uri of the institution. :param scope: string The desired scope. For example '/activities/update', '/read-limited', etc. Returns ------- :returns: string The token.
entailment
def get_token_from_authorization_code(self, authorization_code, redirect_uri): """Like `get_token`, but using an OAuth 2 authorization code. Use this method if you run a webserver that serves as an endpoint for the redirect URI. The webserver can retrieve the authorization code from the URL that is requested by ORCID. Parameters ---------- :param redirect_uri: string The redirect uri of the institution. :param authorization_code: string The authorization code. Returns ------- :returns: dict All data of the access token. The access token itself is in the ``"access_token"`` key. """ token_dict = { "client_id": self._key, "client_secret": self._secret, "grant_type": "authorization_code", "code": authorization_code, "redirect_uri": redirect_uri, } response = requests.post(self._token_url, data=token_dict, headers={'Accept': 'application/json'}, timeout=self._timeout) response.raise_for_status() if self.do_store_raw_response: self.raw_response = response return json.loads(response.text)
Like `get_token`, but using an OAuth 2 authorization code. Use this method if you run a webserver that serves as an endpoint for the redirect URI. The webserver can retrieve the authorization code from the URL that is requested by ORCID. Parameters ---------- :param redirect_uri: string The redirect uri of the institution. :param authorization_code: string The authorization code. Returns ------- :returns: dict All data of the access token. The access token itself is in the ``"access_token"`` key.
entailment
def read_record_public(self, orcid_id, request_type, token, put_code=None, accept_type='application/orcid+json'): """Get the public info about the researcher. Parameters ---------- :param orcid_id: string Id of the queried author. :param request_type: string For example: 'record'. See https://members.orcid.org/api/tutorial/read-orcid-records for possible values. :param token: string Token received from OAuth 2 3-legged authorization. :param put_code: string | list of strings The id of the queried work. In case of 'works' request_type might be a list of strings :param accept_type: expected MIME type of received data Returns ------- :returns: dict | lxml.etree._Element Record(s) in JSON-compatible dictionary representation or in XML E-tree, depending on accept_type specified. """ return self._get_info(orcid_id, self._get_public_info, request_type, token, put_code, accept_type)
Get the public info about the researcher. Parameters ---------- :param orcid_id: string Id of the queried author. :param request_type: string For example: 'record'. See https://members.orcid.org/api/tutorial/read-orcid-records for possible values. :param token: string Token received from OAuth 2 3-legged authorization. :param put_code: string | list of strings The id of the queried work. In case of 'works' request_type might be a list of strings :param accept_type: expected MIME type of received data Returns ------- :returns: dict | lxml.etree._Element Record(s) in JSON-compatible dictionary representation or in XML E-tree, depending on accept_type specified.
entailment
def add_record(self, orcid_id, token, request_type, data, content_type='application/orcid+json'): """Add a record to a profile. Parameters ---------- :param orcid_id: string Id of the author. :param token: string Token received from OAuth 2 3-legged authorization. :param request_type: string One of 'activities', 'education', 'employment', 'funding', 'peer-review', 'work'. :param data: dict | lxml.etree._Element The record in Python-friendly format, as either JSON-compatible dictionary (content_type == 'application/orcid+json') or XML (content_type == 'application/orcid+xml') :param content_type: string MIME type of the passed record. Returns ------- :returns: string Put-code of the new work. """ return self._update_activities(orcid_id, token, requests.post, request_type, data, content_type=content_type)
Add a record to a profile. Parameters ---------- :param orcid_id: string Id of the author. :param token: string Token received from OAuth 2 3-legged authorization. :param request_type: string One of 'activities', 'education', 'employment', 'funding', 'peer-review', 'work'. :param data: dict | lxml.etree._Element The record in Python-friendly format, as either JSON-compatible dictionary (content_type == 'application/orcid+json') or XML (content_type == 'application/orcid+xml') :param content_type: string MIME type of the passed record. Returns ------- :returns: string Put-code of the new work.
entailment
def get_token(self, user_id, password, redirect_uri, scope='/activities/update'): """Get the token. Parameters ---------- :param user_id: string The id of the user used for authentication. :param password: string The user password. :param redirect_uri: string The redirect uri of the institution. :param scope: string The desired scope. For example '/activities/update', '/read-limited', etc. Returns ------- :returns: string The token. """ return super(MemberAPI, self).get_token(user_id, password, redirect_uri, scope)
Get the token. Parameters ---------- :param user_id: string The id of the user used for authentication. :param password: string The user password. :param redirect_uri: string The redirect uri of the institution. :param scope: string The desired scope. For example '/activities/update', '/read-limited', etc. Returns ------- :returns: string The token.
entailment
def get_user_orcid(self, user_id, password, redirect_uri): """Get the user orcid from authentication process. Parameters ---------- :param user_id: string The id of the user used for authentication. :param password: string The user password. :param redirect_uri: string The redirect uri of the institution. Returns ------- :returns: string The orcid. """ response = self._authenticate(user_id, password, redirect_uri, '/authenticate') return response['orcid']
Get the user orcid from authentication process. Parameters ---------- :param user_id: string The id of the user used for authentication. :param password: string The user password. :param redirect_uri: string The redirect uri of the institution. Returns ------- :returns: string The orcid.
entailment
def read_record_member(self, orcid_id, request_type, token, put_code=None, accept_type='application/orcid+json'): """Get the member info about the researcher. Parameters ---------- :param orcid_id: string Id of the queried author. :param request_type: string For example: 'record'. See https://members.orcid.org/api/tutorial/read-orcid-records for possible values.. :param response_format: string One of json, xml. :param token: string Token received from OAuth 2 3-legged authorization. :param put_code: string | list of strings The id of the queried work. In case of 'works' request_type might be a list of strings :param accept_type: expected MIME type of received data Returns ------- :returns: dict | lxml.etree._Element Record(s) in JSON-compatible dictionary representation or in XML E-tree, depending on accept_type specified. """ return self._get_info(orcid_id, self._get_member_info, request_type, token, put_code, accept_type)
Get the member info about the researcher. Parameters ---------- :param orcid_id: string Id of the queried author. :param request_type: string For example: 'record'. See https://members.orcid.org/api/tutorial/read-orcid-records for possible values.. :param response_format: string One of json, xml. :param token: string Token received from OAuth 2 3-legged authorization. :param put_code: string | list of strings The id of the queried work. In case of 'works' request_type might be a list of strings :param accept_type: expected MIME type of received data Returns ------- :returns: dict | lxml.etree._Element Record(s) in JSON-compatible dictionary representation or in XML E-tree, depending on accept_type specified.
entailment
def remove_record(self, orcid_id, token, request_type, put_code): """Add a record to a profile. Parameters ---------- :param orcid_id: string Id of the author. :param token: string Token received from OAuth 2 3-legged authorization. :param request_type: string One of 'activities', 'education', 'employment', 'funding', 'peer-review', 'work'. :param put_code: string The id of the record. Can be retrieved using read_record_* method. In the result of it, it will be called 'put-code'. """ self._update_activities(orcid_id, token, requests.delete, request_type, put_code=put_code)
Add a record to a profile. Parameters ---------- :param orcid_id: string Id of the author. :param token: string Token received from OAuth 2 3-legged authorization. :param request_type: string One of 'activities', 'education', 'employment', 'funding', 'peer-review', 'work'. :param put_code: string The id of the record. Can be retrieved using read_record_* method. In the result of it, it will be called 'put-code'.
entailment
def update_record(self, orcid_id, token, request_type, data, put_code, content_type='application/orcid+json'): """Add a record to a profile. Parameters ---------- :param orcid_id: string Id of the author. :param token: string Token received from OAuth 2 3-legged authorization. :param request_type: string One of 'activities', 'education', 'employment', 'funding', 'peer-review', 'work'. :param data: dict | lxml.etree._Element The record in Python-friendly format, as either JSON-compatible dictionary (content_type == 'application/orcid+json') or XML (content_type == 'application/orcid+xml') :param put_code: string The id of the record. Can be retrieved using read_record_* method. In the result of it, it will be called 'put-code'. :param content_type: string MIME type of the data being sent. """ self._update_activities(orcid_id, token, requests.put, request_type, data, put_code, content_type)
Add a record to a profile. Parameters ---------- :param orcid_id: string Id of the author. :param token: string Token received from OAuth 2 3-legged authorization. :param request_type: string One of 'activities', 'education', 'employment', 'funding', 'peer-review', 'work'. :param data: dict | lxml.etree._Element The record in Python-friendly format, as either JSON-compatible dictionary (content_type == 'application/orcid+json') or XML (content_type == 'application/orcid+xml') :param put_code: string The id of the record. Can be retrieved using read_record_* method. In the result of it, it will be called 'put-code'. :param content_type: string MIME type of the data being sent.
entailment
def remove_duplicates(apps, schema_editor): """ Remove any duplicates from the entity relationship table :param apps: :param schema_editor: :return: """ # Get the model EntityRelationship = apps.get_model('entity', 'EntityRelationship') # Find the duplicates duplicates = EntityRelationship.objects.all().order_by( 'sub_entity_id', 'super_entity_id' ).values( 'sub_entity_id', 'super_entity_id' ).annotate( Count('sub_entity_id'), Count('super_entity_id'), max_id=Max('id') ).filter( super_entity_id__count__gt=1 ) # Loop over the duplicates and delete for duplicate in duplicates: EntityRelationship.objects.filter( sub_entity_id=duplicate['sub_entity_id'], super_entity_id=duplicate['super_entity_id'] ).exclude( id=duplicate['max_id'] ).delete()
Remove any duplicates from the entity relationship table :param apps: :param schema_editor: :return:
entailment
def get_access_tokens(self, authorization_code): """From the authorization code, get the "access token" and the "refresh token" from Box. Args: authorization_code (str). Authorisation code emitted by Box at the url provided by the function :func:`get_authorization_url`. Returns: tuple. (access_token, refresh_token) Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ response = self.box_request.get_access_token(authorization_code) try: att = response.json() except Exception, ex: raise BoxHttpResponseError(ex) if response.status_code >= 400: raise BoxError(response.status_code, att) return att['access_token'], att['refresh_token']
From the authorization code, get the "access token" and the "refresh token" from Box. Args: authorization_code (str). Authorisation code emitted by Box at the url provided by the function :func:`get_authorization_url`. Returns: tuple. (access_token, refresh_token) Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def unpack_frame(message): """Called to unpack a STOMP message into a dictionary. returned = { # STOMP Command: 'cmd' : '...', # Headers e.g. 'headers' : { 'destination' : 'xyz', 'message-id' : 'some event', : etc, } # Body: 'body' : '...1234...\x00', } """ body = [] returned = dict(cmd='', headers={}, body='') breakdown = message.split('\n') # Get the message command: returned['cmd'] = breakdown[0] breakdown = breakdown[1:] def headD(field): # find the first ':' everything to the left of this is a # header, everything to the right is data: index = field.find(':') if index: header = field[:index].strip() data = field[index+1:].strip() # print "header '%s' data '%s'" % (header, data) returned['headers'][header.strip()] = data.strip() def bodyD(field): field = field.strip() if field: body.append(field) # Recover the header fields and body data handler = headD for field in breakdown: # print "field:", field if field.strip() == '': # End of headers, it body data next. handler = bodyD continue handler(field) # Stich the body data together: # print "1. body: ", body body = "".join(body) returned['body'] = body.replace('\x00', '') # print "2. body: <%s>" % returned['body'] return returned
Called to unpack a STOMP message into a dictionary. returned = { # STOMP Command: 'cmd' : '...', # Headers e.g. 'headers' : { 'destination' : 'xyz', 'message-id' : 'some event', : etc, } # Body: 'body' : '...1234...\x00', }
entailment
def ack(messageid, transactionid=None): """STOMP acknowledge command. Acknowledge receipt of a specific message from the server. messageid: This is the id of the message we are acknowledging, what else could it be? ;) transactionid: This is the id that all actions in this transaction will have. If this is not given then a random UUID will be generated for this. """ header = 'message-id: %s' % messageid if transactionid: header = 'message-id: %s\ntransaction: %s' % (messageid, transactionid) return "ACK\n%s\n\n\x00\n" % header
STOMP acknowledge command. Acknowledge receipt of a specific message from the server. messageid: This is the id of the message we are acknowledging, what else could it be? ;) transactionid: This is the id that all actions in this transaction will have. If this is not given then a random UUID will be generated for this.
entailment
def send(dest, msg, transactionid=None): """STOMP send command. dest: This is the channel we wish to subscribe to msg: This is the message body to be sent. transactionid: This is an optional field and is not needed by default. """ transheader = '' if transactionid: transheader = 'transaction: %s\n' % transactionid return "SEND\ndestination: %s\n%s\n%s\x00\n" % (dest, transheader, msg)
STOMP send command. dest: This is the channel we wish to subscribe to msg: This is the message body to be sent. transactionid: This is an optional field and is not needed by default.
entailment
def setCmd(self, cmd): """Check the cmd is valid, FrameError will be raised if its not.""" cmd = cmd.upper() if cmd not in VALID_COMMANDS: raise FrameError("The cmd '%s' is not valid! It must be one of '%s' (STOMP v%s)." % ( cmd, VALID_COMMANDS, STOMP_VERSION) ) else: self._cmd = cmd
Check the cmd is valid, FrameError will be raised if its not.
entailment
def pack(self): """Called to create a STOMP message from the internal values. """ headers = ''.join( ['%s:%s\n' % (f, v) for f, v in sorted(self.headers.items())] ) stomp_message = "%s\n%s\n%s%s\n" % (self._cmd, headers, self.body, NULL) # import pprint # print "stomp_message: ", pprint.pprint(stomp_message) return stomp_message
Called to create a STOMP message from the internal values.
entailment
def unpack(self, message): """Called to extract a STOMP message into this instance. message: This is a text string representing a valid STOMP (v1.0) message. This method uses unpack_frame(...) to extract the information, before it is assigned internally. retuned: The result of the unpack_frame(...) call. """ if not message: raise FrameError("Unpack error! The given message isn't valid '%s'!" % message) msg = unpack_frame(message) self.cmd = msg['cmd'] self.headers = msg['headers'] # Assign directly as the message will have the null # character in the message already. self.body = msg['body'] return msg
Called to extract a STOMP message into this instance. message: This is a text string representing a valid STOMP (v1.0) message. This method uses unpack_frame(...) to extract the information, before it is assigned internally. retuned: The result of the unpack_frame(...) call.
entailment
def react(self, msg): """Called to provide a response to a message if needed. msg: This is a dictionary as returned by unpack_frame(...) or it can be a straight STOMP message. This function will attempt to determine which an deal with it. returned: A message to return or an empty string. """ returned = "" # If its not a string assume its a dict. mtype = type(msg) if mtype in stringTypes: msg = unpack_frame(msg) elif mtype == dict: pass else: raise FrameError("Unknown message type '%s', I don't know what to do with this!" % mtype) if msg['cmd'] in self.states: # print("reacting to message - %s" % msg['cmd']) returned = self.states[msg['cmd']](msg) return returned
Called to provide a response to a message if needed. msg: This is a dictionary as returned by unpack_frame(...) or it can be a straight STOMP message. This function will attempt to determine which an deal with it. returned: A message to return or an empty string.
entailment
def error(self, msg): """Called to handle an error message received from the server. This method just logs the error message returned: NO_RESPONSE_NEEDED """ body = msg['body'].replace(NULL, '') brief_msg = "" if 'message' in msg['headers']: brief_msg = msg['headers']['message'] self.log.error("Received server error - message%s\n\n%s" % (brief_msg, body)) returned = NO_RESPONSE_NEEDED if self.testing: returned = 'error' return returned
Called to handle an error message received from the server. This method just logs the error message returned: NO_RESPONSE_NEEDED
entailment
def receipt(self, msg): """Called to handle a receipt message received from the server. This method just logs the receipt message returned: NO_RESPONSE_NEEDED """ body = msg['body'].replace(NULL, '') brief_msg = "" if 'receipt-id' in msg['headers']: brief_msg = msg['headers']['receipt-id'] self.log.info("Received server receipt message - receipt-id:%s\n\n%s" % (brief_msg, body)) returned = NO_RESPONSE_NEEDED if self.testing: returned = 'receipt' return returned
Called to handle a receipt message received from the server. This method just logs the receipt message returned: NO_RESPONSE_NEEDED
entailment
def log_init(level): """Set up a logger that catches all channels and logs it to stdout. This is used to set up logging when testing. """ log = logging.getLogger() hdlr = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s %(name)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(level)
Set up a logger that catches all channels and logs it to stdout. This is used to set up logging when testing.
entailment
def ack(self, msg): """Override this and do some customer message handler. """ print("Got a message:\n%s\n" % msg['body']) # do something with the message... # Generate the ack or not if you subscribed with ack='auto' return super(Pong, self).ack(msg)
Override this and do some customer message handler.
entailment
def transaction_atomic_with_retry(num_retries=5, backoff=0.1): """ This is a decorator that will wrap the decorated method in an atomic transaction and retry the transaction a given number of times :param num_retries: How many times should we retry before we give up :param backoff: How long should we wait after each try """ # Create the decorator @wrapt.decorator def wrapper(wrapped, instance, args, kwargs): # Keep track of how many times we have tried num_tries = 0 exception = None # Call the main sync entities method and catch any exceptions while num_tries <= num_retries: # Try running the transaction try: with transaction.atomic(): return wrapped(*args, **kwargs) # Catch any operation errors except db.utils.OperationalError as e: num_tries += 1 exception = e sleep(backoff * num_tries) # If we have an exception raise it raise exception # Return the decorator return wrapper
This is a decorator that will wrap the decorated method in an atomic transaction and retry the transaction a given number of times :param num_retries: How many times should we retry before we give up :param backoff: How long should we wait after each try
entailment
def defer_entity_syncing(wrapped, instance, args, kwargs): """ A decorator that can be used to defer the syncing of entities until after the method has been run This is being introduced to help avoid deadlocks in the meantime as we attempt to better understand why they are happening """ # Defer entity syncing while we run our method sync_entities.defer = True # Run the method try: return wrapped(*args, **kwargs) # After we run the method disable the deferred syncing # and sync all the entities that have been buffered to be synced finally: # Enable entity syncing again sync_entities.defer = False # Get the models that need to be synced model_objs = list(sync_entities.buffer.values()) # If none is in the model objects we need to sync all if None in sync_entities.buffer: model_objs = list() # Sync the entities that were deferred if any if len(sync_entities.buffer): sync_entities(*model_objs) # Clear the buffer sync_entities.buffer = {}
A decorator that can be used to defer the syncing of entities until after the method has been run This is being introduced to help avoid deadlocks in the meantime as we attempt to better understand why they are happening
entailment
def _get_super_entities_by_ctype(model_objs_by_ctype, model_ids_to_sync, sync_all): """ Given model objects organized by content type and a dictionary of all model IDs that need to be synced, organize all super entity relationships that need to be synced. Ensure that the model_ids_to_sync dict is updated with any new super entities that need to be part of the overall entity sync """ super_entities_by_ctype = defaultdict(lambda: defaultdict(list)) # pragma: no cover for ctype, model_objs_for_ctype in model_objs_by_ctype.items(): entity_config = entity_registry.entity_registry.get(ctype.model_class()) super_entities = entity_config.get_super_entities(model_objs_for_ctype, sync_all) super_entities_by_ctype[ctype] = { ContentType.objects.get_for_model(model_class, for_concrete_model=False): relationships for model_class, relationships in super_entities.items() } # Continue adding to the set of entities that need to be synced for super_entity_ctype, relationships in super_entities_by_ctype[ctype].items(): for sub_entity_id, super_entity_id in relationships: model_ids_to_sync[ctype].add(sub_entity_id) model_ids_to_sync[super_entity_ctype].add(super_entity_id) return super_entities_by_ctype
Given model objects organized by content type and a dictionary of all model IDs that need to be synced, organize all super entity relationships that need to be synced. Ensure that the model_ids_to_sync dict is updated with any new super entities that need to be part of the overall entity sync
entailment
def _get_model_objs_to_sync(model_ids_to_sync, model_objs_map, sync_all): """ Given the model IDs to sync, fetch all model objects to sync """ model_objs_to_sync = {} for ctype, model_ids_to_sync_for_ctype in model_ids_to_sync.items(): model_qset = entity_registry.entity_registry.get(ctype.model_class()).queryset if not sync_all: model_objs_to_sync[ctype] = model_qset.filter(id__in=model_ids_to_sync_for_ctype) else: model_objs_to_sync[ctype] = [ model_objs_map[ctype, model_id] for model_id in model_ids_to_sync_for_ctype ] return model_objs_to_sync
Given the model IDs to sync, fetch all model objects to sync
entailment
def sync_entities(*model_objs): """ Syncs entities Args: model_objs (List[Model]): The model objects to sync. If empty, all entities will be synced """ # Check if we are deferring processing if sync_entities.defer: # If we dont have any model objects passed add a none to let us know that we need to sync all if not model_objs: sync_entities.buffer[None] = None else: # Add each model obj to the buffer for model_obj in model_objs: sync_entities.buffer[(model_obj.__class__, model_obj.pk)] = model_obj # Return false that we did not do anything return False # Create a syncer and sync EntitySyncer(*model_objs).sync()
Syncs entities Args: model_objs (List[Model]): The model objects to sync. If empty, all entities will be synced
entailment
def sync_entities_watching(instance): """ Syncs entities watching changes of a model instance. """ for entity_model, entity_model_getter in entity_registry.entity_watching[instance.__class__]: model_objs = list(entity_model_getter(instance)) if model_objs: sync_entities(*model_objs)
Syncs entities watching changes of a model instance.
entailment
def upsert_entity_kinds(self, entity_kinds): """ Given a list of entity kinds ensure they are synced properly to the database. This will ensure that only unchanged entity kinds are synced and will still return all updated entity kinds :param entity_kinds: The list of entity kinds to sync """ # Filter out unchanged entity kinds unchanged_entity_kinds = {} if entity_kinds: unchanged_entity_kinds = { (entity_kind.name, entity_kind.display_name): entity_kind for entity_kind in EntityKind.all_objects.extra( where=['(name, display_name) IN %s'], params=[tuple( (entity_kind.name, entity_kind.display_name) for entity_kind in entity_kinds )] ) } # Filter out the unchanged entity kinds changed_entity_kinds = [ entity_kind for entity_kind in entity_kinds if (entity_kind.name, entity_kind.display_name) not in unchanged_entity_kinds ] # If any of our kinds have changed upsert them upserted_enitity_kinds = [] if changed_entity_kinds: # Select all our existing entity kinds for update so we can do proper locking # We have to select all here for some odd reason, if we only select the ones # we are syncing we still run into deadlock issues list(EntityKind.all_objects.all().select_for_update().values_list('id', flat=True)) # Upsert the entity kinds upserted_enitity_kinds = manager_utils.bulk_upsert( queryset=EntityKind.all_objects.filter( name__in=[entity_kind.name for entity_kind in changed_entity_kinds] ), model_objs=changed_entity_kinds, unique_fields=['name'], update_fields=['display_name'], return_upserts=True ) # Return all the entity kinds return upserted_enitity_kinds + list(unchanged_entity_kinds.values())
Given a list of entity kinds ensure they are synced properly to the database. This will ensure that only unchanged entity kinds are synced and will still return all updated entity kinds :param entity_kinds: The list of entity kinds to sync
entailment
def upsert_entities(self, entities, sync=False): """ Upsert a list of entities to the database :param entities: The entities to sync :param sync: Do a sync instead of an upsert """ # Select the entities we are upserting for update to reduce deadlocks if entities: # Default select for update query when syncing all select_for_update_query = ( 'SELECT FROM {table_name} FOR NO KEY UPDATE' ).format( table_name=Entity._meta.db_table ) select_for_update_query_params = [] # If we are not syncing all, only select those we are updating if not sync: select_for_update_query = ( 'SELECT FROM {table_name} WHERE (entity_type_id, entity_id) IN %s FOR NO KEY UPDATE' ).format( table_name=Entity._meta.db_table ) select_for_update_query_params = [tuple( (entity.entity_type_id, entity.entity_id) for entity in entities )] # Select the items for update with connection.cursor() as cursor: cursor.execute(select_for_update_query, select_for_update_query_params) # If we are syncing run the sync logic if sync: upserted_entities = manager_utils.sync( queryset=Entity.all_objects.all(), model_objs=entities, unique_fields=['entity_type_id', 'entity_id'], update_fields=['entity_kind_id', 'entity_meta', 'display_name', 'is_active'], return_upserts=True ) # Otherwise we want to upsert our entities else: upserted_entities = manager_utils.bulk_upsert( queryset=Entity.all_objects.extra( where=['(entity_type_id, entity_id) IN %s'], params=[tuple( (entity.entity_type_id, entity.entity_id) for entity in entities )] ), model_objs=entities, unique_fields=['entity_type_id', 'entity_id'], update_fields=['entity_kind_id', 'entity_meta', 'display_name', 'is_active'], return_upserts=True ) # Return the upserted entities return upserted_entities
Upsert a list of entities to the database :param entities: The entities to sync :param sync: Do a sync instead of an upsert
entailment
def upsert_entity_relationships(self, queryset, entity_relationships): """ Upsert entity relationships to the database :param queryset: The base queryset to use :param entity_relationships: The entity relationships to ensure exist in the database """ # Select the relationships for update if entity_relationships: list(queryset.select_for_update().values_list( 'id', flat=True )) # Sync the relationships return manager_utils.sync( queryset=queryset, model_objs=entity_relationships, unique_fields=['sub_entity_id', 'super_entity_id'], update_fields=[], return_upserts=True )
Upsert entity relationships to the database :param queryset: The base queryset to use :param entity_relationships: The entity relationships to ensure exist in the database
entailment
def get_entity_kind(self, model_obj): """ Returns a tuple for a kind name and kind display name of an entity. By default, uses the app_label and model of the model object's content type as the kind. """ model_obj_ctype = ContentType.objects.get_for_model(self.queryset.model) return (u'{0}.{1}'.format(model_obj_ctype.app_label, model_obj_ctype.model), u'{0}'.format(model_obj_ctype))
Returns a tuple for a kind name and kind display name of an entity. By default, uses the app_label and model of the model object's content type as the kind.
entailment
def register_entity(self, entity_config): """ Registers an entity config """ if not issubclass(entity_config, EntityConfig): raise ValueError('Must register entity config class of subclass EntityConfig') if entity_config.queryset is None: raise ValueError('Entity config must define queryset') model = entity_config.queryset.model self._entity_registry[model] = entity_config() # Add watchers to the global look up table for watching_model, entity_model_getter in entity_config.watching: self._entity_watching[watching_model].append((model, entity_model_getter))
Registers an entity config
entailment
def start(host='localhost', port=61613, username='', password=''): """Start twisted event loop and the fun should begin... """ StompClientFactory.username = username StompClientFactory.password = password reactor.connectTCP(host, port, StompClientFactory()) reactor.run()
Start twisted event loop and the fun should begin...
entailment
def connected(self, msg): """Once I've connected I want to subscribe to my the message queue. """ stomper.Engine.connected(self, msg) self.log.info("Connected: session %s. Beginning say hello." % msg['headers']['session']) def setup_looping_call(): lc = LoopingCall(self.send) lc.start(2) reactor.callLater(1, setup_looping_call) f = stomper.Frame() f.unpack(stomper.subscribe(DESTINATION)) # ActiveMQ specific headers: # # prevent the messages we send comming back to us. f.headers['activemq.noLocal'] = 'true' return f.pack()
Once I've connected I want to subscribe to my the message queue.
entailment
def send(self): """Send out a hello message periodically. """ self.log.info("Saying hello (%d)." % self.counter) f = stomper.Frame() f.unpack(stomper.send(DESTINATION, 'hello there (%d)' % self.counter)) self.counter += 1 # ActiveMQ specific headers: # #f.headers['persistent'] = 'true' self.transport.write(f.pack())
Send out a hello message periodically.
entailment
def connectionMade(self): """Register with stomp server. """ cmd = stomper.connect(self.username, self.password) self.transport.write(cmd)
Register with stomp server.
entailment
def dataReceived(self, data): """Use stompbuffer to determine when a complete message has been received. """ self.stompBuffer.appendData(data) while True: msg = self.stompBuffer.getOneMessage() if msg is None: break returned = self.react(msg) if returned: self.transport.write(returned)
Use stompbuffer to determine when a complete message has been received.
entailment
def clientConnectionFailed(self, connector, reason): """Connection failed """ print('Connection failed. Reason:', reason) ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
Connection failed
entailment
def ack(self, msg): """Process the message and determine what to do with it. """ self.log.info("receiverId <%s> Received: <%s> " % (self.receiverId, msg['body'])) #return super(MyStomp, self).ack(msg) return stomper.NO_REPONSE_NEEDED
Process the message and determine what to do with it.
entailment
def connectionMade(self): """Register with the stomp server. """ cmd = self.sm.connect() self.transport.write(cmd)
Register with the stomp server.
entailment
def dataReceived(self, data): """Data received, react to it and respond if needed. """ # print "receiver dataReceived: <%s>" % data msg = stomper.unpack_frame(data) returned = self.sm.react(msg) # print "receiver returned <%s>" % returned if returned: self.transport.write(returned)
Data received, react to it and respond if needed.
entailment
def find_id_in_folder(self, name, parent_folder_id=0): """Find a folder or a file ID from its name, inside a given folder. Args: name (str): Name of the folder or the file to find. parent_folder_id (int): ID of the folder where to search. Returns: int. ID of the file or folder found. None if not found. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ if name is None or len(name) == 0: return parent_folder_id offset = 0 resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=['name']) total = int(resp['total_count']) while offset < total: found = self.__find_name(resp, name) if found is not None: return found offset += int(len(resp['entries'])) resp = self.get_folder_items(parent_folder_id, limit=1000, offset=offset, fields_list=['name']) return None
Find a folder or a file ID from its name, inside a given folder. Args: name (str): Name of the folder or the file to find. parent_folder_id (int): ID of the folder where to search. Returns: int. ID of the file or folder found. None if not found. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def create_folder(self, name, parent_folder_id=0): """Create a folder If the folder exists, a BoxError will be raised. Args: folder_id (int): Name of the folder. parent_folder_id (int): ID of the folder where to create the new one. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ return self.__request("POST", "folders", data={ "name": name, "parent": {"id": unicode(parent_folder_id)} })
Create a folder If the folder exists, a BoxError will be raised. Args: folder_id (int): Name of the folder. parent_folder_id (int): ID of the folder where to create the new one. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def delete_folder(self, folder_id, recursive=True): """Delete an existing folder Args: folder_id (int): ID of the folder to delete. recursive (bool): Delete all subfolder if True. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ return self.__request("DELETE", "folders/%s" % (folder_id, ), querystring={'recursive': unicode(recursive).lower()})
Delete an existing folder Args: folder_id (int): ID of the folder to delete. recursive (bool): Delete all subfolder if True. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def get_folder_items(self, folder_id, limit=100, offset=0, fields_list=None): """Get files and folders inside a given folder Args: folder_id (int): Where to get files and folders info. limit (int): The number of items to return. offset (int): The item at which to begin the response. fields_list (list): List of attributes to get. All attributes if None. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ qs = { "limit": limit, "offset": offset } if fields_list: qs['fields'] = ','.join(fields_list) return self.__request("GET", "folders/%s/items" % (folder_id, ), querystring=qs)
Get files and folders inside a given folder Args: folder_id (int): Where to get files and folders info. limit (int): The number of items to return. offset (int): The item at which to begin the response. fields_list (list): List of attributes to get. All attributes if None. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def upload_file(self, name, folder_id, file_path): """Upload a file into a folder. Use function for small file otherwise there is the chunk_upload_file() function Args:: name (str): Name of the file on your Box storage. folder_id (int): ID of the folder where to upload the file. file_path (str): Local path of the file to upload. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ try: return self.__do_upload_file(name, folder_id, file_path) except BoxError, ex: if ex.status != 401: raise #tokens had been refreshed, so we start again the upload return self.__do_upload_file(name, folder_id, file_path)
Upload a file into a folder. Use function for small file otherwise there is the chunk_upload_file() function Args:: name (str): Name of the file on your Box storage. folder_id (int): ID of the folder where to upload the file. file_path (str): Local path of the file to upload. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def upload_new_file_version(self, name, folder_id, file_id, file_path): """Upload a new version of a file into a folder. Use function for small file otherwise there is the chunk_upload_file() function. Args:: name (str): Name of the file on your Box storage. folder_id (int): ID of the folder where to upload the file. file_id (int): ID of the file to update. file_path (str): Local path of the file to upload. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ try: return self.__do_upload_file(name, folder_id, file_path, file_id) except BoxError, ex: if ex.status != 401: raise #tokens had been refreshed, so we start again the upload return self.__do_upload_file(name, folder_id, file_path, file_id)
Upload a new version of a file into a folder. Use function for small file otherwise there is the chunk_upload_file() function. Args:: name (str): Name of the file on your Box storage. folder_id (int): ID of the folder where to upload the file. file_id (int): ID of the file to update. file_path (str): Local path of the file to upload. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def chunk_upload_file(self, name, folder_id, file_path, progress_callback=None, chunk_size=1024*1024*1): """Upload a file chunk by chunk. The whole file is never loaded in memory. Use this function for big file. The callback(transferred, total) to let you know the upload progress. Upload can be cancelled if the callback raise an Exception. >>> def progress_callback(transferred, total): ... print 'Uploaded %i bytes of %i' % (transferred, total, ) ... if user_request_cancel: ... raise MyCustomCancelException() Args: name (str): Name of the file on your Box storage. folder_id (int): ID of the folder where to upload the file. file_path (str): Local path of the file to upload. progress_callback (func): Function called each time a chunk is uploaded. chunk_size (int): Size of chunks. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ try: return self.__do_chunk_upload_file(name, folder_id, file_path, progress_callback, chunk_size) except BoxError, ex: if ex.status != 401: raise #tokens had been refreshed, so we start again the upload return self.__do_chunk_upload_file(name, folder_id, file_path, progress_callback, chunk_size)
Upload a file chunk by chunk. The whole file is never loaded in memory. Use this function for big file. The callback(transferred, total) to let you know the upload progress. Upload can be cancelled if the callback raise an Exception. >>> def progress_callback(transferred, total): ... print 'Uploaded %i bytes of %i' % (transferred, total, ) ... if user_request_cancel: ... raise MyCustomCancelException() Args: name (str): Name of the file on your Box storage. folder_id (int): ID of the folder where to upload the file. file_path (str): Local path of the file to upload. progress_callback (func): Function called each time a chunk is uploaded. chunk_size (int): Size of chunks. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def copy_file(self, file_id, dest_folder_id): """Copy file to new destination Args: file_id (int): ID of the folder. dest_folder_id (int): ID of parent folder you are copying to. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxError: 409 - Item with the same name already exists. In this case you will need download the file and upload a new version to your destination. (Box currently doesn't have a method to copy a new verison.) BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ return self.__request("POST", "/files/" + unicode(file_id) + "/copy", data={ "parent": {"id": unicode(dest_folder_id)} })
Copy file to new destination Args: file_id (int): ID of the folder. dest_folder_id (int): ID of parent folder you are copying to. Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxError: 409 - Item with the same name already exists. In this case you will need download the file and upload a new version to your destination. (Box currently doesn't have a method to copy a new verison.) BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def download_file(self, file_id, dest_file_path, progress_callback=None, chunk_size=1024*1024*1): """Download a file. The whole file is never loaded in memory. The callback(transferred, total) to let you know the download progress. Download can be cancelled if the callback raise an Exception. >>> def progress_callback(transferred, total): ... print 'Downloaded %i bytes of %i' % (transferred, total, ) ... if user_request_cancel: ... raise MyCustomCancelException() Args: file_id (int): ID of the file to download. dest_file_path (str): Local path where to store the downloaded filed. progress_callback (func): Function called each time a chunk is downloaded. chunk_size (int): Size of chunks. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ with open(dest_file_path, 'wb') as fp: req = self.__request("GET", "files/%s/content" % (file_id, ), stream=True, json_data=False) total = -1 if hasattr(req, 'headers'): lower_headers = {k.lower():v for k,v in req.headers.items()} if 'content-length' in lower_headers: total = lower_headers['content-length'] transferred = 0 for chunk in req.iter_content(chunk_size=chunk_size): if chunk: # filter out keep-alive new chunks if progress_callback: progress_callback(transferred, total) fp.write(chunk) fp.flush() transferred += len(chunk) if progress_callback: progress_callback(transferred, total)
Download a file. The whole file is never loaded in memory. The callback(transferred, total) to let you know the download progress. Download can be cancelled if the callback raise an Exception. >>> def progress_callback(transferred, total): ... print 'Downloaded %i bytes of %i' % (transferred, total, ) ... if user_request_cancel: ... raise MyCustomCancelException() Args: file_id (int): ID of the file to download. dest_file_path (str): Local path where to store the downloaded filed. progress_callback (func): Function called each time a chunk is downloaded. chunk_size (int): Size of chunks. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def search(self, **kwargs): """Searches for files/folders Args: \*\*kwargs (dict): A dictionary containing necessary parameters (check https://developers.box.com/docs/#search for list of parameters) Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem. """ query_string = {} for key, value in kwargs.iteritems(): query_string[key] = value return self.__request("GET","search",querystring=query_string)
Searches for files/folders Args: \*\*kwargs (dict): A dictionary containing necessary parameters (check https://developers.box.com/docs/#search for list of parameters) Returns: dict. Response from Box. Raises: BoxError: An error response is returned from Box (status_code >= 400). BoxHttpResponseError: Response from Box is malformed. requests.exceptions.*: Any connection related problem.
entailment
def getmany(self, *keys): """ Return a list of values corresponding to the keys in the iterable of *keys*. If a key is not present in the collection, its corresponding value will be :obj:`None`. .. note:: This method is not implemented by standard Python dictionary classes. """ pickled_keys = (self._pickle_key(k) for k in keys) pickled_values = self.redis.hmget(self.key, *pickled_keys) ret = [] for k, v in zip(keys, pickled_values): value = self.cache.get(k, self._unpickle(v)) ret.append(value) return ret
Return a list of values corresponding to the keys in the iterable of *keys*. If a key is not present in the collection, its corresponding value will be :obj:`None`. .. note:: This method is not implemented by standard Python dictionary classes.
entailment
def _data(self, pipe=None): """ Returns a Python dictionary with the same values as this object (without checking the local cache). """ pipe = self.redis if pipe is None else pipe items = pipe.hgetall(self.key).items() return {self._unpickle_key(k): self._unpickle(v) for k, v in items}
Returns a Python dictionary with the same values as this object (without checking the local cache).
entailment
def iteritems(self, pipe=None): """Return an iterator over the dictionary's ``(key, value)`` pairs.""" pipe = self.redis if pipe is None else pipe for k, v in self._data(pipe).items(): yield k, self.cache.get(k, v)
Return an iterator over the dictionary's ``(key, value)`` pairs.
entailment
def pop(self, key, default=__marker): """If *key* is in the dictionary, remove it and return its value, else return *default*. If *default* is not given and *key* is not in the dictionary, a :exc:`KeyError` is raised. """ pickled_key = self._pickle_key(key) if key in self.cache: self.redis.hdel(self.key, pickled_key) return self.cache.pop(key) def pop_trans(pipe): pickled_value = pipe.hget(self.key, pickled_key) if pickled_value is None: if default is self.__marker: raise KeyError(key) return default pipe.hdel(self.key, pickled_key) return self._unpickle(pickled_value) value = self._transaction(pop_trans) self.cache.pop(key, None) return value
If *key* is in the dictionary, remove it and return its value, else return *default*. If *default* is not given and *key* is not in the dictionary, a :exc:`KeyError` is raised.
entailment
def popitem(self): """Remove and return an arbitrary ``(key, value)`` pair from the dictionary. :func:`popitem` is useful to destructively iterate over a dictionary, as often used in set algorithms. If the dictionary is empty, calling :func:`popitem` raises a :exc:`KeyError`. """ def popitem_trans(pipe): try: pickled_key = pipe.hkeys(self.key)[0] except IndexError: raise KeyError # pop its value pipe.multi() pipe.hget(self.key, pickled_key) pipe.hdel(self.key, pickled_key) pickled_value, __ = pipe.execute() return ( self._unpickle_key(pickled_key), self._unpickle(pickled_value) ) key, value = self._transaction(popitem_trans) return key, self.cache.pop(key, value)
Remove and return an arbitrary ``(key, value)`` pair from the dictionary. :func:`popitem` is useful to destructively iterate over a dictionary, as often used in set algorithms. If the dictionary is empty, calling :func:`popitem` raises a :exc:`KeyError`.
entailment
def setdefault(self, key, default=None): """If *key* is in the dictionary, return its value. If not, insert *key* with a value of *default* and return *default*. *default* defaults to :obj:`None`. """ if key in self.cache: return self.cache[key] def setdefault_trans(pipe): pickled_key = self._pickle_key(key) pipe.multi() pipe.hsetnx(self.key, pickled_key, self._pickle_value(default)) pipe.hget(self.key, pickled_key) __, pickled_value = pipe.execute() return self._unpickle(pickled_value) value = self._transaction(setdefault_trans) if self.writeback: self.cache[key] = value return value
If *key* is in the dictionary, return its value. If not, insert *key* with a value of *default* and return *default*. *default* defaults to :obj:`None`.
entailment
def update(self, other=None, **kwargs): """Update the dictionary with the key/value pairs from *other*, overwriting existing keys. Return :obj:`None`. :func:`update` accepts either another dictionary object or an iterable of key/value pairs (as tuples or other iterables of length two). If keyword arguments are specified, the dictionary is then updated with those key/value pairs: ``d.update(red=1, blue=2)``. """ if other is not None: if self._same_redis(other, RedisCollection): self._update_helper(other, use_redis=True) elif hasattr(other, 'keys'): self._update_helper(other) else: self._update_helper({k: v for k, v in other}) if kwargs: self._update_helper(kwargs)
Update the dictionary with the key/value pairs from *other*, overwriting existing keys. Return :obj:`None`. :func:`update` accepts either another dictionary object or an iterable of key/value pairs (as tuples or other iterables of length two). If keyword arguments are specified, the dictionary is then updated with those key/value pairs: ``d.update(red=1, blue=2)``.
entailment
def copy(self, key=None): """ Return a new collection with the same items as this one. If *key* is specified, create the new collection with the given Redis key. """ other = self.__class__(redis=self.redis, key=key) other.update(self) return other
Return a new collection with the same items as this one. If *key* is specified, create the new collection with the given Redis key.
entailment
def fromkeys(cls, seq, value=None, **kwargs): """Create a new dictionary with keys from *seq* and values set to *value*. .. note:: :func:`fromkeys` is a class method that returns a new dictionary. It is possible to specify additional keyword arguments to be passed to :func:`__init__` of the new object. """ values = ((key, value) for key in seq) return cls(values, **kwargs)
Create a new dictionary with keys from *seq* and values set to *value*. .. note:: :func:`fromkeys` is a class method that returns a new dictionary. It is possible to specify additional keyword arguments to be passed to :func:`__init__` of the new object.
entailment
def scan_items(self): """ Yield each of the ``(key, value)`` pairs from the collection, without pulling them all into memory. .. warning:: This method is not available on the dictionary collections provided by Python. This method may return the same (key, value) pair multiple times. See the `Redis SCAN documentation <http://redis.io/commands/scan#scan-guarantees>`_ for details. """ for k, v in self.redis.hscan_iter(self.key): yield self._unpickle_key(k), self._unpickle(v)
Yield each of the ``(key, value)`` pairs from the collection, without pulling them all into memory. .. warning:: This method is not available on the dictionary collections provided by Python. This method may return the same (key, value) pair multiple times. See the `Redis SCAN documentation <http://redis.io/commands/scan#scan-guarantees>`_ for details.
entailment
def update(self, other=None, **kwargs): """Elements are counted from an *iterable* or added-in from another *mapping* (or counter). Like :func:`dict.update` but adds counts instead of replacing them. Also, the *iterable* is expected to be a sequence of elements, not a sequence of ``(key, value)`` pairs. """ if other is not None: if self._same_redis(other, RedisCollection): self._update_helper(other, operator.add, use_redis=True) elif hasattr(other, 'keys'): self._update_helper(other, operator.add) else: self._update_helper(collections.Counter(other), operator.add) if kwargs: self._update_helper(kwargs, operator.add)
Elements are counted from an *iterable* or added-in from another *mapping* (or counter). Like :func:`dict.update` but adds counts instead of replacing them. Also, the *iterable* is expected to be a sequence of elements, not a sequence of ``(key, value)`` pairs.
entailment
def subtract(self, other=None, **kwargs): """Elements are subtracted from an *iterable* or from another *mapping* (or counter). Like :func:`dict.update` but subtracts counts instead of replacing them. """ if other is not None: if self._same_redis(other, RedisCollection): self._update_helper(other, operator.sub, use_redis=True) elif hasattr(other, 'keys'): self._update_helper(other, operator.sub) else: self._update_helper(collections.Counter(other), operator.sub) if kwargs: self._update_helper(kwargs, operator.sub)
Elements are subtracted from an *iterable* or from another *mapping* (or counter). Like :func:`dict.update` but subtracts counts instead of replacing them.
entailment
def ack(self, msg): """Processes the received message. I don't need to generate an ack message. """ self.log.info("senderID:%s Received: %s " % (self.senderID, msg['body'])) return stomper.NO_REPONSE_NEEDED
Processes the received message. I don't need to generate an ack message.
entailment
def _clear(self, pipe=None): """Helper for clear operations. :param pipe: Redis pipe in case update is performed as a part of transaction. :type pipe: :class:`redis.client.StrictPipeline` or :class:`redis.client.StrictRedis` """ redis = self.redis if pipe is None else pipe redis.delete(self.key)
Helper for clear operations. :param pipe: Redis pipe in case update is performed as a part of transaction. :type pipe: :class:`redis.client.StrictPipeline` or :class:`redis.client.StrictRedis`
entailment
def _normalize_index(self, index, pipe=None): """Convert negative indexes into their positive equivalents.""" pipe = self.redis if pipe is None else pipe len_self = self.__len__(pipe) positive_index = index if index >= 0 else len_self + index return len_self, positive_index
Convert negative indexes into their positive equivalents.
entailment
def _normalize_slice(self, index, pipe=None): """Given a :obj:`slice` *index*, return a 4-tuple ``(start, stop, step, fowrward)``. The first three items can be used with the ``range`` function to retrieve the values associated with the slice; the last item indicates the direction. """ if index.step == 0: raise ValueError pipe = self.redis if pipe is None else pipe len_self = self.__len__(pipe) step = index.step or 1 forward = step > 0 step = abs(step) if index.start is None: start = 0 if forward else len_self - 1 elif index.start < 0: start = max(len_self + index.start, 0) else: start = min(index.start, len_self) if index.stop is None: stop = len_self if forward else -1 elif index.stop < 0: stop = max(len_self + index.stop, 0) else: stop = min(index.stop, len_self) if not forward: start, stop = min(stop + 1, len_self), min(start + 1, len_self) return start, stop, step, forward, len_self
Given a :obj:`slice` *index*, return a 4-tuple ``(start, stop, step, fowrward)``. The first three items can be used with the ``range`` function to retrieve the values associated with the slice; the last item indicates the direction.
entailment
def _transaction(self, fn, *extra_keys): """Helper simplifying code within watched transaction. Takes *fn*, function treated as a transaction. Returns whatever *fn* returns. ``self.key`` is watched. *fn* takes *pipe* as the only argument. :param fn: Closure treated as a transaction. :type fn: function *fn(pipe)* :param extra_keys: Optional list of additional keys to watch. :type extra_keys: list :rtype: whatever *fn* returns """ results = [] def trans(pipe): results.append(fn(pipe)) self.redis.transaction(trans, self.key, *extra_keys) return results[0]
Helper simplifying code within watched transaction. Takes *fn*, function treated as a transaction. Returns whatever *fn* returns. ``self.key`` is watched. *fn* takes *pipe* as the only argument. :param fn: Closure treated as a transaction. :type fn: function *fn(pipe)* :param extra_keys: Optional list of additional keys to watch. :type extra_keys: list :rtype: whatever *fn* returns
entailment
def recursive_path(pack, path): """Find paths recursively""" matches = [] for root, _, filenames in os.walk(os.path.join(pack, path)): for filename in filenames: matches.append(os.path.join(root, filename)[len(pack) + 1:]) return matches
Find paths recursively
entailment
def nack(messageid, subscriptionid, transactionid=None): """STOMP negative acknowledge command. NACK is the opposite of ACK. It is used to tell the server that the client did not consume the message. The server can then either send the message to a different client, discard it, or put it in a dead letter queue. The exact behavior is server specific. messageid: This is the id of the message we are acknowledging, what else could it be? ;) subscriptionid: This is the id of the subscription that applies to the message. transactionid: This is the id that all actions in this transaction will have. If this is not given then a random UUID will be generated for this. """ header = 'subscription:%s\nmessage-id:%s' % (subscriptionid, messageid) if transactionid: header += '\ntransaction:%s' % transactionid return "NACK\n%s\n\n\x00\n" % header
STOMP negative acknowledge command. NACK is the opposite of ACK. It is used to tell the server that the client did not consume the message. The server can then either send the message to a different client, discard it, or put it in a dead letter queue. The exact behavior is server specific. messageid: This is the id of the message we are acknowledging, what else could it be? ;) subscriptionid: This is the id of the subscription that applies to the message. transactionid: This is the id that all actions in this transaction will have. If this is not given then a random UUID will be generated for this.
entailment
def connect(username, password, host, heartbeats=(0,0)): """STOMP connect command. username, password: These are the needed auth details to connect to the message server. After sending this we will receive a CONNECTED message which will contain our session id. """ if len(heartbeats) != 2: raise ValueError('Invalid heartbeat %r' % heartbeats) cx, cy = heartbeats return "CONNECT\naccept-version:1.1\nhost:%s\nheart-beat:%i,%i\nlogin:%s\npasscode:%s\n\n\x00\n" % (host, cx, cy, username, password)
STOMP connect command. username, password: These are the needed auth details to connect to the message server. After sending this we will receive a CONNECTED message which will contain our session id.
entailment
def ack(self, msg): """Called when a MESSAGE has been received. Override this method to handle received messages. This function will generate an acknowledge message for the given message and transaction (if present). """ message_id = msg['headers']['message-id'] subscription = msg['headers']['subscription'] transaction_id = None if 'transaction-id' in msg['headers']: transaction_id = msg['headers']['transaction-id'] # print "acknowledging message id <%s>." % message_id return ack(message_id, subscription, transaction_id)
Called when a MESSAGE has been received. Override this method to handle received messages. This function will generate an acknowledge message for the given message and transaction (if present).
entailment
def getOneMessage ( self ): """ I pull one complete message off the buffer and return it decoded as a dict. If there is no complete message in the buffer, I return None. Note that the buffer can contain more than once message. You should therefore call me in a loop until I return None. """ ( mbytes, hbytes ) = self._findMessageBytes ( self.buffer ) if not mbytes: return None msgdata = self.buffer[:mbytes] self.buffer = self.buffer[mbytes:] hdata = msgdata[:hbytes] elems = hdata.split ( '\n' ) cmd = elems.pop ( 0 ) headers = {} # We can't use a simple split because the value can legally contain # colon characters (for example, the session returned by ActiveMQ). for e in elems: try: i = e.find ( ':' ) except ValueError: continue k = e[:i].strip() v = e[i+1:].strip() headers [ k ] = v # hbytes points to the start of the '\n\n' at the end of the header, # so 2 bytes beyond this is the start of the body. The body EXCLUDES # the final two bytes, which are '\x00\n'. Note that these 2 bytes # are UNRELATED to the 2-byte '\n\n' that Frame.pack() used to insert # into the data stream. body = msgdata[hbytes+2:-2] msg = { 'cmd' : cmd, 'headers' : headers, 'body' : body, } return msg
I pull one complete message off the buffer and return it decoded as a dict. If there is no complete message in the buffer, I return None. Note that the buffer can contain more than once message. You should therefore call me in a loop until I return None.
entailment
def _findMessageBytes ( self, data ): """ I examine the data passed to me and return a 2-tuple of the form: ( message_length, header_length ) where message_length is the length in bytes of the first complete message, if it contains at least one message, or 0 if it contains no message. If message_length is non-zero, header_length contains the length in bytes of the header. If message_length is zero, header_length should be ignored. You should probably not call me directly. Call getOneMessage instead. """ # Sanity check. See the docstring for the method to see what it # does an why we need it. self.syncBuffer() # If the string '\n\n' does not exist, we don't even have the complete # header yet and we MUST exit. try: i = data.index ( '\n\n' ) except ValueError: return ( 0, 0 ) # If the string '\n\n' exists, then we have the entire header and can # check for the content-length header. If it exists, we can check # the length of the buffer for the number of bytes, else we check for # the existence of a null byte. # Pull out the header before we perform the regexp search. This # prevents us from matching (possibly malicious) strings in the # body. _hdr = self.buffer[:i] match = content_length_re.search ( _hdr ) if match: # There was a content-length header, so read out the value. content_length = int ( match.groups()[0] ) # THIS IS NO LONGER THE CASE IF WE REMOVE THE '\n\n' in # Frame.pack() # This is the content length of the body up until the null # byte, not the entire message. Note that this INCLUDES the 2 # '\n\n' bytes inserted by the STOMP encoder after the body # (see the calculation of content_length in # StompEngine.callRemote()), so we only need to add 2 final bytes # for the footer. # #The message looks like: # # <header>\n\n<body>\n\n\x00\n # ^ ^^^^ # (i) included in content_length! # # We have the location of the end of the header (i), so we # need to ensure that the message contains at least: # # i + len ( '\n\n' ) + content_length + len ( '\x00\n' ) # # Note that i is also the count of bytes in the header, because # of the fact that str.index() returns a 0-indexed value. req_len = i + len_sep + content_length + len_footer # log.msg ( "We have [%s] bytes and need [%s] bytes" % # ( len ( data ), req_len, ) ) if len ( data ) < req_len: # We don't have enough bytes in the buffer. return ( 0, 0 ) else: # We have enough bytes in the buffer return ( req_len, i ) else: # There was no content-length header, so just look for the # message terminator ('\x00\n' ). try: j = data.index ( '\x00\n' ) except ValueError: return ( 0, 0 ) # j points to the 0-indexed location of the null byte. However, # we need to add 1 (to turn it into a byte count) and 1 to take # account of the final '\n' character after the null byte. return ( j + 2, i )
I examine the data passed to me and return a 2-tuple of the form: ( message_length, header_length ) where message_length is the length in bytes of the first complete message, if it contains at least one message, or 0 if it contains no message. If message_length is non-zero, header_length contains the length in bytes of the header. If message_length is zero, header_length should be ignored. You should probably not call me directly. Call getOneMessage instead.
entailment