sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def _update_style(self): """ Sets the style to the specified Pygments style. """ try: self._style = get_style_by_name(self._pygments_style) except ClassNotFound: # unknown style, also happen with plugins style when used from a # frozen app. if self._pygments_style == 'qt': from pyqode.core.styles import QtStyle self._style = QtStyle elif self._pygments_style == 'darcula': from pyqode.core.styles import DarculaStyle self._style = DarculaStyle else: self._style = get_style_by_name('default') self._pygments_style = 'default' self._clear_caches()
Sets the style to the specified Pygments style.
entailment
def _get_format(self, token): """ Returns a QTextCharFormat for token or None. """ if token == Whitespace: return self.editor.whitespaces_foreground if token in self._formats: return self._formats[token] result = self._get_format_from_style(token, self._style) self._formats[token] = result return result
Returns a QTextCharFormat for token or None.
entailment
def goto_line(self, line, column=0, move=True): """ Moves the text cursor to the specified position.. :param line: Number of the line to go to (0 based) :param column: Optional column number. Default is 0 (start of line). :param move: True to move the cursor. False will return the cursor without setting it on the editor. :return: The new text cursor :rtype: QtGui.QTextCursor """ text_cursor = self.move_cursor_to(line) if column: text_cursor.movePosition(text_cursor.Right, text_cursor.MoveAnchor, column) if move: block = text_cursor.block() # unfold parent fold trigger if the block is collapsed try: folding_panel = self._editor.panels.get('FoldingPanel') except KeyError: pass else: from pyqode.core.api.folding import FoldScope if not block.isVisible(): block = FoldScope.find_parent_scope(block) if TextBlockHelper.is_collapsed(block): folding_panel.toggle_fold_trigger(block) self._editor.setTextCursor(text_cursor) return text_cursor
Moves the text cursor to the specified position.. :param line: Number of the line to go to (0 based) :param column: Optional column number. Default is 0 (start of line). :param move: True to move the cursor. False will return the cursor without setting it on the editor. :return: The new text cursor :rtype: QtGui.QTextCursor
entailment
def select_lines(self, start=0, end=-1, apply_selection=True): """ Selects entire lines between start and end line numbers. This functions apply the selection and returns the text cursor that contains the selection. Optionally it is possible to prevent the selection from being applied on the code editor widget by setting ``apply_selection`` to False. :param start: Start line number (0 based) :param end: End line number (0 based). Use -1 to select up to the end of the document :param apply_selection: True to apply the selection before returning the QTextCursor. :returns: A QTextCursor that holds the requested selection """ editor = self._editor if end == -1: end = self.line_count() - 1 if start < 0: start = 0 text_cursor = self.move_cursor_to(start) if end > start: # Going down text_cursor.movePosition(text_cursor.Down, text_cursor.KeepAnchor, end - start) text_cursor.movePosition(text_cursor.EndOfLine, text_cursor.KeepAnchor) elif end < start: # going up # don't miss end of line ! text_cursor.movePosition(text_cursor.EndOfLine, text_cursor.MoveAnchor) text_cursor.movePosition(text_cursor.Up, text_cursor.KeepAnchor, start - end) text_cursor.movePosition(text_cursor.StartOfLine, text_cursor.KeepAnchor) else: text_cursor.movePosition(text_cursor.EndOfLine, text_cursor.KeepAnchor) if apply_selection: editor.setTextCursor(text_cursor) return text_cursor
Selects entire lines between start and end line numbers. This functions apply the selection and returns the text cursor that contains the selection. Optionally it is possible to prevent the selection from being applied on the code editor widget by setting ``apply_selection`` to False. :param start: Start line number (0 based) :param end: End line number (0 based). Use -1 to select up to the end of the document :param apply_selection: True to apply the selection before returning the QTextCursor. :returns: A QTextCursor that holds the requested selection
entailment
def line_indent(self, line_nbr=None): """ Returns the indent level of the specified line :param line_nbr: Number of the line to get indentation (1 base). Pass None to use the current line number. Note that you can also pass a QTextBlock instance instead of an int. :return: Number of spaces that makes the indentation level of the current line """ if line_nbr is None: line_nbr = self.current_line_nbr() elif isinstance(line_nbr, QtGui.QTextBlock): line_nbr = line_nbr.blockNumber() line = self.line_text(line_nbr) indentation = len(line) - len(line.lstrip()) return indentation
Returns the indent level of the specified line :param line_nbr: Number of the line to get indentation (1 base). Pass None to use the current line number. Note that you can also pass a QTextBlock instance instead of an int. :return: Number of spaces that makes the indentation level of the current line
entailment
def get_right_word(self, cursor=None): """ Gets the character on the right of the text cursor. :param cursor: QTextCursor where the search will start. :return: The word that is on the right of the text cursor. """ if cursor is None: cursor = self._editor.textCursor() cursor.movePosition(QtGui.QTextCursor.WordRight, QtGui.QTextCursor.KeepAnchor) return cursor.selectedText().strip()
Gets the character on the right of the text cursor. :param cursor: QTextCursor where the search will start. :return: The word that is on the right of the text cursor.
entailment
def search_text(self, text_cursor, search_txt, search_flags): """ Searches a text in a text document. :param text_cursor: Current text cursor :param search_txt: Text to search :param search_flags: QTextDocument.FindFlags :returns: the list of occurrences, the current occurrence index :rtype: tuple([], int) """ def compare_cursors(cursor_a, cursor_b): """ Compares two QTextCursor :param cursor_a: cursor a :param cursor_b: cursor b :returns; True if both cursor are identical (same position, same selection) """ return (cursor_b.selectionStart() >= cursor_a.selectionStart() and cursor_b.selectionEnd() <= cursor_a.selectionEnd()) text_document = self._editor.document() occurrences = [] index = -1 cursor = text_document.find(search_txt, 0, search_flags) original_cursor = text_cursor while not cursor.isNull(): if compare_cursors(cursor, original_cursor): index = len(occurrences) occurrences.append((cursor.selectionStart(), cursor.selectionEnd())) cursor.setPosition(cursor.position() + 1) cursor = text_document.find(search_txt, cursor, search_flags) return occurrences, index
Searches a text in a text document. :param text_cursor: Current text cursor :param search_txt: Text to search :param search_flags: QTextDocument.FindFlags :returns: the list of occurrences, the current occurrence index :rtype: tuple([], int)
entailment
def is_comment_or_string(self, cursor_or_block, formats=None): """ Checks if a block/cursor is a string or a comment. :param cursor_or_block: QTextCursor or QTextBlock :param formats: the list of color scheme formats to consider. By default, it will consider the following keys: 'comment', 'string', 'docstring'. """ if formats is None: formats = ["comment", "string", "docstring"] layout = None pos = 0 if isinstance(cursor_or_block, QtGui.QTextBlock): pos = len(cursor_or_block.text()) - 1 layout = cursor_or_block.layout() elif isinstance(cursor_or_block, QtGui.QTextCursor): b = cursor_or_block.block() pos = cursor_or_block.position() - b.position() layout = b.layout() if layout is not None: additional_formats = layout.additionalFormats() sh = self._editor.syntax_highlighter if sh: ref_formats = sh.color_scheme.formats for r in additional_formats: if r.start <= pos < (r.start + r.length): for fmt_type in formats: is_user_obj = (r.format.objectType() == r.format.UserObject) if (ref_formats[fmt_type] == r.format and is_user_obj): return True return False
Checks if a block/cursor is a string or a comment. :param cursor_or_block: QTextCursor or QTextBlock :param formats: the list of color scheme formats to consider. By default, it will consider the following keys: 'comment', 'string', 'docstring'.
entailment
def match_select(self, ignored_symbols=None): """ Performs matched selection, selects text between matching quotes or parentheses. :param ignored_symbols; matching symbols to ignore. """ def filter_matching(ignored_symbols, matching): """ Removes any ignored symbol from the match dict. """ if ignored_symbols is not None: for symbol in matching.keys(): if symbol in ignored_symbols: matching.pop(symbol) return matching def find_opening_symbol(cursor, matching): """ Find the position ot the opening symbol :param cursor: Current text cursor :param matching: symbol matches map """ start_pos = None opening_char = None closed = {k: 0 for k in matching.values() if k not in ['"', "'"]} # go left stop = False while not stop and not cursor.atStart(): cursor.clearSelection() cursor.movePosition(cursor.Left, cursor.KeepAnchor) char = cursor.selectedText() if char in closed.keys(): closed[char] += 1 elif char in matching.keys(): opposite = matching[char] if opposite in closed.keys() and closed[opposite]: closed[opposite] -= 1 continue else: # found opening quote or parenthesis start_pos = cursor.position() + 1 stop = True opening_char = char return opening_char, start_pos def find_closing_symbol(cursor, matching, opening_char, original_pos): """ Finds the position of the closing symbol :param cursor: current text cursor :param matching: symbold matching dict :param opening_char: the opening character :param original_pos: position of the opening character. """ end_pos = None cursor.setPosition(original_pos) rev_matching = {v: k for k, v in matching.items()} opened = {k: 0 for k in rev_matching.values() if k not in ['"', "'"]} stop = False while not stop and not cursor.atEnd(): cursor.clearSelection() cursor.movePosition(cursor.Right, cursor.KeepAnchor) char = cursor.selectedText() if char in opened.keys(): opened[char] += 1 elif char in rev_matching.keys(): opposite = rev_matching[char] if opposite in opened.keys() and opened[opposite]: opened[opposite] -= 1 continue elif matching[opening_char] == char: # found opening quote or parenthesis end_pos = cursor.position() - 1 stop = True return end_pos matching = {'(': ')', '{': '}', '[': ']', '"': '"', "'": "'"} filter_matching(ignored_symbols, matching) cursor = self._editor.textCursor() original_pos = cursor.position() end_pos = None opening_char, start_pos = find_opening_symbol(cursor, matching) if opening_char: end_pos = find_closing_symbol( cursor, matching, opening_char, original_pos) if start_pos and end_pos: cursor.setPosition(start_pos) cursor.movePosition(cursor.Right, cursor.KeepAnchor, end_pos - start_pos) self._editor.setTextCursor(cursor) return True else: return False
Performs matched selection, selects text between matching quotes or parentheses. :param ignored_symbols; matching symbols to ignore.
entailment
def main(): """Entrypoint for the console script gcp-devrel-py-tools.""" parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() appengine.register_commands(subparsers) requirements.register_commands(subparsers) pylint.register_commands(subparsers) args = parser.parse_args() args.func(args)
Entrypoint for the console script gcp-devrel-py-tools.
entailment
async def run(websession): """Run.""" try: client = Client('17015', websession) print('Client instantiated for ZIP "{0}"'.format(client.zip_code)) print() print('CURRENT ALLERGENS') print(await client.allergens.current()) print() print('EXTENDED ALLERGENS') print(await client.allergens.extended()) print() print('HISTORIC ALLERGENS') print(await client.allergens.historic()) print() print('ALLERGY OUTLOOK') print(await client.allergens.outlook()) print() print('EXTENDED DISEASE INFO') print(await client.disease.extended()) print() print('CURRENT ASTHMA INFO') print(await client.asthma.current()) print() print('EXTENDED ASTHMA INFO') print(await client.asthma.extended()) print() print('HISTORIC ASTHMA INFO') print(await client.asthma.historic()) except PollenComError as err: print(err)
Run.
entailment
async def outlook(self) -> dict: """Get allergen outlook.""" try: return await self._request( 'get', 'https://www.pollen.com/api/forecast/outlook') except RequestError as err: if '404' in str(err): raise InvalidZipError('No data returned for ZIP code') else: raise RequestError(err)
Get allergen outlook.
entailment
def dev(): """Define dev stage""" env.roledefs = { 'web': ['192.168.1.2'], 'lb': ['192.168.1.2'], } env.user = 'vagrant' env.backends = env.roledefs['web'] env.server_name = 'django_search_model-dev.net' env.short_server_name = 'django_search_model-dev' env.static_folder = '/site_media/' env.server_ip = '192.168.1.2' env.no_shared_sessions = False env.server_ssl_on = False env.goal = 'dev' env.socket_port = '8001' env.map_settings = {} execute(build_env)
Define dev stage
entailment
def install_postgres(user=None, dbname=None, password=None): """Install Postgres on remote""" execute(pydiploy.django.install_postgres_server, user=user, dbname=dbname, password=password)
Install Postgres on remote
entailment
def field_to_dict(fields): """ Build dictionnary which dependancy for each field related to "root" fields = ["toto", "toto__tata", "titi__tutu"] dico = { "toto": { EMPTY_DICT, "tata": EMPTY_DICT }, "titi" : { "tutu": EMPTY_DICT } } EMPTY_DICT is useful because we don't lose field without it dico["toto"] would only contains "tata" inspired from django.db.models.sql.add_select_related """ field_dict = {} for field in fields: d_tmp = field_dict for part in field.split(LOOKUP_SEP)[:-1]: d_tmp = d_tmp.setdefault(part, {}) d_tmp = d_tmp.setdefault( field.split(LOOKUP_SEP)[-1], deepcopy(EMPTY_DICT) ).update(deepcopy(EMPTY_DICT)) return field_dict
Build dictionnary which dependancy for each field related to "root" fields = ["toto", "toto__tata", "titi__tutu"] dico = { "toto": { EMPTY_DICT, "tata": EMPTY_DICT }, "titi" : { "tutu": EMPTY_DICT } } EMPTY_DICT is useful because we don't lose field without it dico["toto"] would only contains "tata" inspired from django.db.models.sql.add_select_related
entailment
def alias_field(model, field): """ Return the prefix name of a field """ for part in field.split(LOOKUP_SEP)[:-1]: model = associate_model(model,part) return model.__name__ + "-" + field.split(LOOKUP_SEP)[-1]
Return the prefix name of a field
entailment
def associate_model(model, field): """ Return the model associate to the ForeignKey or ManyToMany relation """ class_field = model._meta.get_field(field) if hasattr(class_field, "field"): return class_field.field.related.related_model else: return class_field.related_model
Return the model associate to the ForeignKey or ManyToMany relation
entailment
def get_formfield(model, field): """ Return the formfied associate to the field of the model """ class_field = model._meta.get_field(field) if hasattr(class_field, "field"): formfield = class_field.field.formfield() else: formfield = class_field.formfield() # Otherwise the formfield contain the reverse relation if isinstance(formfield, ChoiceField): formfield.choices = class_field.get_choices() return formfield
Return the formfied associate to the field of the model
entailment
def get_q_object(self): """ Build Q object to filter the queryset """ q_object = Q() for field in self.searchable_fields: value = self.request.GET.getlist(alias_field(self.model, field), None) mini_q = Q() for val in value: attr = "{0}{1}".format(field, self.specifications.get(field, '')) if val: dic_tmp = { attr: val } mini_q |= Q(**dic_tmp) q_object &= mini_q return q_object
Build Q object to filter the queryset
entailment
def get_search_form(self): """ Return list of form based on model """ magic_dico_form = self.get_dict_for_forms() forms = [] initial = list(self.request.GET.lists()) for key, value in magic_dico_form.items(): form = Form() model = value["model"] if not value["fields"]: continue for field in value["fields"]: formfield = get_formfield(model, field) formfield.widget.attrs.update({'class': self.css_class}) form.fields.update({ field : formfield }) initial_tmp = {} for k, vals in initial: tmp_list = k.split(model.__name__ + "-") if len(tmp_list) == 2: list_val_tmp = vals[0] if len(vals) == 1 else [val for val in vals if val != ''] initial_tmp[tmp_list[-1]] = list_val_tmp form.initial = initial_tmp form.prefix = model.__name__ forms.append(form) return sorted(forms, key=lambda form: form.prefix)
Return list of form based on model
entailment
def get_dict_for_forms(self): """ Build a dictionnary where searchable_fields are next to their model to be use in modelform_factory dico = { "str(model)" : { "model" : Model, "fields" = [] #searchable_fields which are attribute of Model } } """ magic_dico = field_to_dict(self.searchable_fields) dico = {} def dict_from_fields_r(mini_dict, dico, model): """ Create the dico recursively from the magic_dico """ dico[str(model)] = {} dico[str(model)]["model"] = model dico[str(model)]["fields"] = [] for key, value in mini_dict.items(): if isinstance(value, bool): continue if value == EMPTY_DICT: dico[str(model)]["fields"].append(key) elif EMPTY_DICT.items() <= value.items(): dico[str(model)]["fields"].append(key) model_tmp = associate_model(model, key) dict_from_fields_r(value, dico, model_tmp) else: model_tmp = associate_model(model, key) dict_from_fields_r(value, dico, model_tmp) if magic_dico: dict_from_fields_r(magic_dico, dico, self.model) return dico
Build a dictionnary where searchable_fields are next to their model to be use in modelform_factory dico = { "str(model)" : { "model" : Model, "fields" = [] #searchable_fields which are attribute of Model } }
entailment
def raise_on_invalid_zip(func: Callable) -> Callable: """Raise an exception when there's no data (via a bad ZIP code).""" async def decorator(*args: list, **kwargs: dict) -> dict: """Decorate.""" data = await func(*args, **kwargs) if not data['Location']['periods']: raise InvalidZipError('No data returned for ZIP code') return data return decorator
Raise an exception when there's no data (via a bad ZIP code).
entailment
def parse(self): """ Parses a GIT URL and returns an object. Raises an exception on invalid URL. :returns: Parsed object :raise: :class:`.ParserError` """ d = { 'pathname': None, 'protocols': self._get_protocols(), 'protocol': 'ssh', 'href': self._url, 'resource': None, 'user': None, 'port': None, 'name': None, 'owner': None, } for regex in POSSIBLE_REGEXES: match = regex.search(self._url) if match: d.update(match.groupdict()) break else: msg = "Invalid URL '{}'".format(self._url) raise ParserError(msg) return Parsed(**d)
Parses a GIT URL and returns an object. Raises an exception on invalid URL. :returns: Parsed object :raise: :class:`.ParserError`
entailment
async def _request( self, method: str, url: str, *, headers: dict = None, params: dict = None, json: dict = None) -> dict: """Make a request against AirVisual.""" full_url = '{0}/{1}'.format(url, self.zip_code) pieces = urlparse(url) if not headers: headers = {} headers.update({ 'Content-Type': 'application/json', 'Referer': '{0}://{1}'.format(pieces.scheme, pieces.netloc), 'User-Agent': API_USER_AGENT }) async with self._websession.request(method, full_url, headers=headers, params=params, json=json) as resp: try: resp.raise_for_status() data = await resp.json(content_type=None) return data except client_exceptions.ClientError as err: raise RequestError( 'Error requesting data from {0}: {1}'.format(url, err))
Make a request against AirVisual.
entailment
def setup_errors(app, error_template="error.html"): """Add a handler for each of the available HTTP error responses.""" def error_handler(error): if isinstance(error, HTTPException): description = error.get_description(request.environ) code = error.code name = error.name else: description = error code = 500 name = "Internal Server Error" return render_template(error_template, error=error, code=code, name=Markup(name), description=Markup(description)), code for exception in default_exceptions: app.register_error_handler(exception, error_handler)
Add a handler for each of the available HTTP error responses.
entailment
def domain_search(self, domain=None, company=None, limit=None, offset=None, emails_type=None, raw=False): """ Return all the email addresses found for a given domain. :param domain: The domain on which to search for emails. Must be defined if company is not. :param company: The name of the company on which to search for emails. Must be defined if domain is not. :param limit: The maximum number of emails to give back. Default is 10. :param offset: The number of emails to skip. Default is 0. :param emails_type: The type of emails to give back. Can be one of 'personal' or 'generic'. :param raw: Gives back the entire response instead of just the 'data'. :return: Full payload of the query as a dict, with email addresses found. """ if not domain and not company: raise MissingCompanyError( 'You must supply at least a domain name or a company name' ) if domain: params = {'domain': domain, 'api_key': self.api_key} elif company: params = {'company': company, 'api_key': self.api_key} if limit: params['limit'] = limit if offset: params['offset'] = offset if emails_type: params['type'] = emails_type endpoint = self.base_endpoint.format('domain-search') return self._query_hunter(endpoint, params, raw=raw)
Return all the email addresses found for a given domain. :param domain: The domain on which to search for emails. Must be defined if company is not. :param company: The name of the company on which to search for emails. Must be defined if domain is not. :param limit: The maximum number of emails to give back. Default is 10. :param offset: The number of emails to skip. Default is 0. :param emails_type: The type of emails to give back. Can be one of 'personal' or 'generic'. :param raw: Gives back the entire response instead of just the 'data'. :return: Full payload of the query as a dict, with email addresses found.
entailment
def email_finder(self, domain=None, company=None, first_name=None, last_name=None, full_name=None, raw=False): """ Find the email address of a person given its name and company's domain. :param domain: The domain of the company where the person works. Must be defined if company is not. :param company: The name of the company where the person works. Must be defined if domain is not. :param first_name: The first name of the person. Must be defined if full_name is not. :param last_name: The last name of the person. Must be defined if full_name is not. :param full_name: The full name of the person. Must be defined if first_name AND last_name are not. :param raw: Gives back the entire response instead of just email and score. :return: email and score as a tuple. """ params = self.base_params if not domain and not company: raise MissingCompanyError( 'You must supply at least a domain name or a company name' ) if domain: params['domain'] = domain elif company: params['company'] = company if not(first_name and last_name) and not full_name: raise MissingNameError( 'You must supply a first name AND a last name OR a full name' ) if first_name and last_name: params['first_name'] = first_name params['last_name'] = last_name elif full_name: params['full_name'] = full_name endpoint = self.base_endpoint.format('email-finder') res = self._query_hunter(endpoint, params, raw=raw) if raw: return res email = res['email'] score = res['score'] return email, score
Find the email address of a person given its name and company's domain. :param domain: The domain of the company where the person works. Must be defined if company is not. :param company: The name of the company where the person works. Must be defined if domain is not. :param first_name: The first name of the person. Must be defined if full_name is not. :param last_name: The last name of the person. Must be defined if full_name is not. :param full_name: The full name of the person. Must be defined if first_name AND last_name are not. :param raw: Gives back the entire response instead of just email and score. :return: email and score as a tuple.
entailment
def email_verifier(self, email, raw=False): """ Verify the deliverability of a given email adress.abs :param email: The email adress to check. :param raw: Gives back the entire response instead of just the 'data'. :return: Full payload of the query as a dict. """ params = {'email': email, 'api_key': self.api_key} endpoint = self.base_endpoint.format('email-verifier') return self._query_hunter(endpoint, params, raw=raw)
Verify the deliverability of a given email adress.abs :param email: The email adress to check. :param raw: Gives back the entire response instead of just the 'data'. :return: Full payload of the query as a dict.
entailment
def email_count(self, domain=None, company=None, raw=False): """ Give back the number of email adresses Hunter has for this domain/company. :param domain: The domain of the company where the person works. Must be defined if company is not. If both 'domain' and 'company' are given, the 'domain' will be used. :param company: The name of the company where the person works. Must be defined if domain is not. :param raw: Gives back the entire response instead of just the 'data'. :return: Full payload of the query as a dict. """ params = self.base_params if not domain and not company: raise MissingCompanyError( 'You must supply at least a domain name or a company name' ) if domain: params['domain'] = domain elif company: params['company'] = company endpoint = self.base_endpoint.format('email-count') return self._query_hunter(endpoint, params, raw=raw)
Give back the number of email adresses Hunter has for this domain/company. :param domain: The domain of the company where the person works. Must be defined if company is not. If both 'domain' and 'company' are given, the 'domain' will be used. :param company: The name of the company where the person works. Must be defined if domain is not. :param raw: Gives back the entire response instead of just the 'data'. :return: Full payload of the query as a dict.
entailment
def account_information(self, raw=False): """ Gives the information about the account associated with the api_key. :param raw: Gives back the entire response instead of just the 'data'. :return: Full payload of the query as a dict. """ params = self.base_params endpoint = self.base_endpoint.format('account') res = self._query_hunter(endpoint, params, raw=raw) if raw: return res res['calls']['left'] = res['calls']['available'] - res['calls']['used'] return res
Gives the information about the account associated with the api_key. :param raw: Gives back the entire response instead of just the 'data'. :return: Full payload of the query as a dict.
entailment
def get_leads(self, offset=None, limit=None, lead_list_id=None, first_name=None, last_name=None, email=None, company=None, phone_number=None, twitter=None): """ Gives back all the leads saved in your account. :param offset: Number of leads to skip. :param limit: Maximum number of leads to return. :param lead_list_id: Id of a lead list to query leads on. :param first_name: First name to filter on. :param last_name: Last name to filter on. :param email: Email to filter on. :param company: Company to filter on. :param phone_number: Phone number to filter on. :param twitter: Twitter account to filter on. :return: All leads found as a dict. """ args = locals() args_params = dict((key, value) for key, value in args.items() if value is not None) args_params.pop('self') params = self.base_params params.update(args_params) endpoint = self.base_endpoint.format('leads') return self._query_hunter(endpoint, params)
Gives back all the leads saved in your account. :param offset: Number of leads to skip. :param limit: Maximum number of leads to return. :param lead_list_id: Id of a lead list to query leads on. :param first_name: First name to filter on. :param last_name: Last name to filter on. :param email: Email to filter on. :param company: Company to filter on. :param phone_number: Phone number to filter on. :param twitter: Twitter account to filter on. :return: All leads found as a dict.
entailment
def get_lead(self, lead_id): """ Get a specific lead saved on your account. :param lead_id: Id of the lead to search. Must be defined. :return: Lead found as a dict. """ params = self.base_params endpoint = self.base_endpoint.format('leads/' + str(lead_id)) return self._query_hunter(endpoint, params)
Get a specific lead saved on your account. :param lead_id: Id of the lead to search. Must be defined. :return: Lead found as a dict.
entailment
def create_lead(self, first_name, last_name, email=None, position=None, company=None, company_industry=None, company_size=None, confidence_score=None, website=None, country_code=None, postal_code=None, source=None, linkedin_url=None, phone_number=None, twitter=None, leads_list_id=None): """ Create a lead on your account. :param first_name: The first name of the lead to create. Must be defined. :param last_name: The last name of the lead to create. Must be defined. :param email: The email of the lead to create. :param position: The professional position of the lead to create. :param company: The company of the lead to create. :param company_industry: The type of industry of the company where the lead works. :param company_size: The size of the company where the lead works. :param confidence_score: The confidence score of the lead's email. :param website: The website of the lead's company. :param country_code: The country code of the lead's company. :param postal_code: The postal code of the lead's company. :param source: The source of the lead's email. :param linkedin_url: The URL of the lead's LinkedIn profile. :param phone_number: The phone number of the lead to create. :param twitter: The lead's Twitter account. :param leads_list_id: The id of the leads list where to save the new lead. :return: The newly created lead as a dict. """ args = locals() payload = dict((key, value) for key, value in args.items() if value is not None) payload.pop('self') params = self.base_params endpoint = self.base_endpoint.format('leads') return self._query_hunter(endpoint, params, 'post', payload)
Create a lead on your account. :param first_name: The first name of the lead to create. Must be defined. :param last_name: The last name of the lead to create. Must be defined. :param email: The email of the lead to create. :param position: The professional position of the lead to create. :param company: The company of the lead to create. :param company_industry: The type of industry of the company where the lead works. :param company_size: The size of the company where the lead works. :param confidence_score: The confidence score of the lead's email. :param website: The website of the lead's company. :param country_code: The country code of the lead's company. :param postal_code: The postal code of the lead's company. :param source: The source of the lead's email. :param linkedin_url: The URL of the lead's LinkedIn profile. :param phone_number: The phone number of the lead to create. :param twitter: The lead's Twitter account. :param leads_list_id: The id of the leads list where to save the new lead. :return: The newly created lead as a dict.
entailment
def get_leads_lists(self, offset=None, limit=None): """ Gives back all the leads lists saved on your account. :param offset: Number of lists to skip. :param limit: Maximum number of lists to return. :return: Leads lists found as a dict. """ params = self.base_params if offset: params['offset'] = offset if limit: params['limit'] = limit endpoint = self.base_endpoint.format('leads_lists') return self._query_hunter(endpoint, params)
Gives back all the leads lists saved on your account. :param offset: Number of lists to skip. :param limit: Maximum number of lists to return. :return: Leads lists found as a dict.
entailment
def create_leads_list(self, name, team_id=None): """ Create a leads list. :param name: Name of the list to create. Must be defined. :param team_id: The id of the list to share this list with. :return: The created leads list as a dict. """ params = self.base_params payload = {'name': name} if team_id: payload['team_id'] = team_id endpoint = self.base_endpoint.format('leads_lists') return self._query_hunter(endpoint, params, 'post', payload)
Create a leads list. :param name: Name of the list to create. Must be defined. :param team_id: The id of the list to share this list with. :return: The created leads list as a dict.
entailment
def update_leads_list(self, leads_list_id, name, team_id=None): """ Update a leads list. :param name: Name of the list to update. Must be defined. :param team_id: The id of the list to share this list with. :return: 204 Response. """ params = self.base_params payload = {'name': name} if team_id: payload['team_id'] = team_id endpoint = self.base_endpoint.format('leads_lists/' + str(leads_list_id)) return self._query_hunter(endpoint, params, 'put', payload)
Update a leads list. :param name: Name of the list to update. Must be defined. :param team_id: The id of the list to share this list with. :return: 204 Response.
entailment
def delete_leads_list(self, leads_list_id): """ Delete a leads list. :param leads_list_id: The id of the list to delete. :return: 204 Response. """ params = self.base_params endpoint = self.base_endpoint.format( 'leads_lists/' + str(leads_list_id) ) return self._query_hunter(endpoint, params, 'delete')
Delete a leads list. :param leads_list_id: The id of the list to delete. :return: 204 Response.
entailment
def to_int(s): """ converts a string to an integer >>> to_int('1_000_000') 1000000 >>> to_int('1e6') 1000000 >>> to_int('1000') 1000 """ try: return int(s.replace('_', '')) except ValueError: return int(ast.literal_eval(s))
converts a string to an integer >>> to_int('1_000_000') 1000000 >>> to_int('1e6') 1000000 >>> to_int('1000') 1000
entailment
def dicts_from_lines(lines): """ returns a generator producing dicts from json lines 1 JSON object per line is supported: {"name": "n1"} {"name": "n2"} Or 1 JSON object: { "name": "n1" } Or a list of JSON objects: [ {"name": "n1"}, {"name": "n2"}, ] """ lines = iter(lines) for line in lines: line = line.strip() if not line: continue # skip empty lines try: yield json.loads(line, object_pairs_hook=OrderedDict) except json.decoder.JSONDecodeError: content = line + ''.join(lines) dicts = json.loads(content, object_pairs_hook=OrderedDict) if isinstance(dicts, list): yield from dicts else: yield dicts
returns a generator producing dicts from json lines 1 JSON object per line is supported: {"name": "n1"} {"name": "n2"} Or 1 JSON object: { "name": "n1" } Or a list of JSON objects: [ {"name": "n1"}, {"name": "n2"}, ]
entailment
def compose(*funcs): """ Compose any number of unary functions into a single unary function. >>> import textwrap >>> from six import text_type >>> stripped = text_type.strip(textwrap.dedent(compose.__doc__)) >>> compose(text_type.strip, textwrap.dedent)(compose.__doc__) == stripped True Compose also allows the innermost function to take arbitrary arguments. >>> round_three = lambda x: round(x, ndigits=3) >>> f = compose(round_three, int.__truediv__) >>> [f(3*x, x+1) for x in range(1,10)] [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7] """ def compose_two(f1, f2): return lambda *args, **kwargs: f1(f2(*args, **kwargs)) return functools.reduce(compose_two, funcs)
Compose any number of unary functions into a single unary function. >>> import textwrap >>> from six import text_type >>> stripped = text_type.strip(textwrap.dedent(compose.__doc__)) >>> compose(text_type.strip, textwrap.dedent)(compose.__doc__) == stripped True Compose also allows the innermost function to take arbitrary arguments. >>> round_three = lambda x: round(x, ndigits=3) >>> f = compose(round_three, int.__truediv__) >>> [f(3*x, x+1) for x in range(1,10)] [1.5, 2.0, 2.25, 2.4, 2.5, 2.571, 2.625, 2.667, 2.7]
entailment
def method_caller(method_name, *args, **kwargs): """ Return a function that will call a named method on the target object with optional positional and keyword arguments. >>> lower = method_caller('lower') >>> lower('MyString') 'mystring' """ def call_method(target): func = getattr(target, method_name) return func(*args, **kwargs) return call_method
Return a function that will call a named method on the target object with optional positional and keyword arguments. >>> lower = method_caller('lower') >>> lower('MyString') 'mystring'
entailment
def once(func): """ Decorate func so it's only ever called the first time. This decorator can ensure that an expensive or non-idempotent function will not be expensive on subsequent calls and is idempotent. >>> add_three = once(lambda a: a+3) >>> add_three(3) 6 >>> add_three(9) 6 >>> add_three('12') 6 To reset the stored value, simply clear the property ``saved_result``. >>> del add_three.saved_result >>> add_three(9) 12 >>> add_three(8) 12 Or invoke 'reset()' on it. >>> add_three.reset() >>> add_three(-3) 0 >>> add_three(0) 0 """ @functools.wraps(func) def wrapper(*args, **kwargs): if not hasattr(wrapper, 'saved_result'): wrapper.saved_result = func(*args, **kwargs) return wrapper.saved_result wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result') return wrapper
Decorate func so it's only ever called the first time. This decorator can ensure that an expensive or non-idempotent function will not be expensive on subsequent calls and is idempotent. >>> add_three = once(lambda a: a+3) >>> add_three(3) 6 >>> add_three(9) 6 >>> add_three('12') 6 To reset the stored value, simply clear the property ``saved_result``. >>> del add_three.saved_result >>> add_three(9) 12 >>> add_three(8) 12 Or invoke 'reset()' on it. >>> add_three.reset() >>> add_three(-3) 0 >>> add_three(0) 0
entailment
def method_cache(method, cache_wrapper=None): """ Wrap lru_cache to support storing the cache data in the object instances. Abstracts the common paradigm where the method explicitly saves an underscore-prefixed protected property on first call and returns that subsequently. >>> class MyClass: ... calls = 0 ... ... @method_cache ... def method(self, value): ... self.calls += 1 ... return value >>> a = MyClass() >>> a.method(3) 3 >>> for x in range(75): ... res = a.method(x) >>> a.calls 75 Note that the apparent behavior will be exactly like that of lru_cache except that the cache is stored on each instance, so values in one instance will not flush values from another, and when an instance is deleted, so are the cached values for that instance. >>> b = MyClass() >>> for x in range(35): ... res = b.method(x) >>> b.calls 35 >>> a.method(0) 0 >>> a.calls 75 Note that if method had been decorated with ``functools.lru_cache()``, a.calls would have been 76 (due to the cached value of 0 having been flushed by the 'b' instance). Clear the cache with ``.cache_clear()`` >>> a.method.cache_clear() Another cache wrapper may be supplied: >>> cache = lru_cache(maxsize=2) >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) >>> a = MyClass() >>> a.method2() 3 Caution - do not subsequently wrap the method with another decorator, such as ``@property``, which changes the semantics of the function. See also http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ for another implementation and additional justification. """ cache_wrapper = cache_wrapper or lru_cache() def wrapper(self, *args, **kwargs): # it's the first call, replace the method with a cached, bound method bound_method = types.MethodType(method, self) cached_method = cache_wrapper(bound_method) setattr(self, method.__name__, cached_method) return cached_method(*args, **kwargs) return _special_method_cache(method, cache_wrapper) or wrapper
Wrap lru_cache to support storing the cache data in the object instances. Abstracts the common paradigm where the method explicitly saves an underscore-prefixed protected property on first call and returns that subsequently. >>> class MyClass: ... calls = 0 ... ... @method_cache ... def method(self, value): ... self.calls += 1 ... return value >>> a = MyClass() >>> a.method(3) 3 >>> for x in range(75): ... res = a.method(x) >>> a.calls 75 Note that the apparent behavior will be exactly like that of lru_cache except that the cache is stored on each instance, so values in one instance will not flush values from another, and when an instance is deleted, so are the cached values for that instance. >>> b = MyClass() >>> for x in range(35): ... res = b.method(x) >>> b.calls 35 >>> a.method(0) 0 >>> a.calls 75 Note that if method had been decorated with ``functools.lru_cache()``, a.calls would have been 76 (due to the cached value of 0 having been flushed by the 'b' instance). Clear the cache with ``.cache_clear()`` >>> a.method.cache_clear() Another cache wrapper may be supplied: >>> cache = lru_cache(maxsize=2) >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) >>> a = MyClass() >>> a.method2() 3 Caution - do not subsequently wrap the method with another decorator, such as ``@property``, which changes the semantics of the function. See also http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ for another implementation and additional justification.
entailment
def _special_method_cache(method, cache_wrapper): """ Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5 """ name = method.__name__ special_names = '__getattr__', '__getitem__' if name not in special_names: return wrapper_name = '__cached' + name def proxy(self, *args, **kwargs): if wrapper_name not in vars(self): bound = types.MethodType(method, self) cache = cache_wrapper(bound) setattr(self, wrapper_name, cache) else: cache = getattr(self, wrapper_name) return cache(*args, **kwargs) return proxy
Because Python treats special methods differently, it's not possible to use instance attributes to implement the cached methods. Instead, install the wrapper method under a different name and return a simple proxy to that wrapper. https://github.com/jaraco/jaraco.functools/issues/5
entailment
def result_invoke(action): r""" Decorate a function with an action function that is invoked on the results returned from the decorated function (for its side-effect), then return the original result. >>> @result_invoke(print) ... def add_two(a, b): ... return a + b >>> x = add_two(2, 3) 5 """ def wrap(func): @functools.wraps(func) def wrapper(*args, **kwargs): result = func(*args, **kwargs) action(result) return result return wrapper return wrap
r""" Decorate a function with an action function that is invoked on the results returned from the decorated function (for its side-effect), then return the original result. >>> @result_invoke(print) ... def add_two(a, b): ... return a + b >>> x = add_two(2, 3) 5
entailment
def first_invoke(func1, func2): """ Return a function that when invoked will invoke func1 without any parameters (for its side-effect) and then invoke func2 with whatever parameters were passed, returning its result. """ def wrapper(*args, **kwargs): func1() return func2(*args, **kwargs) return wrapper
Return a function that when invoked will invoke func1 without any parameters (for its side-effect) and then invoke func2 with whatever parameters were passed, returning its result.
entailment
def retry_call(func, cleanup=lambda: None, retries=0, trap=()): """ Given a callable func, trap the indicated exceptions for up to 'retries' times, invoking cleanup on the exception. On the final attempt, allow any exceptions to propagate. """ attempts = count() if retries == float('inf') else range(retries) for attempt in attempts: try: return func() except trap: cleanup() return func()
Given a callable func, trap the indicated exceptions for up to 'retries' times, invoking cleanup on the exception. On the final attempt, allow any exceptions to propagate.
entailment
def retry(*r_args, **r_kwargs): """ Decorator wrapper for retry_call. Accepts arguments to retry_call except func and then returns a decorator for the decorated function. Ex: >>> @retry(retries=3) ... def my_func(a, b): ... "this is my funk" ... print(a, b) >>> my_func.__doc__ 'this is my funk' """ def decorate(func): @functools.wraps(func) def wrapper(*f_args, **f_kwargs): bound = functools.partial(func, *f_args, **f_kwargs) return retry_call(bound, *r_args, **r_kwargs) return wrapper return decorate
Decorator wrapper for retry_call. Accepts arguments to retry_call except func and then returns a decorator for the decorated function. Ex: >>> @retry(retries=3) ... def my_func(a, b): ... "this is my funk" ... print(a, b) >>> my_func.__doc__ 'this is my funk'
entailment
def print_yielded(func): """ Convert a generator into a function that prints all yielded elements >>> @print_yielded ... def x(): ... yield 3; yield None >>> x() 3 None """ print_all = functools.partial(map, print) print_results = compose(more_itertools.recipes.consume, print_all, func) return functools.wraps(func)(print_results)
Convert a generator into a function that prints all yielded elements >>> @print_yielded ... def x(): ... yield 3; yield None >>> x() 3 None
entailment
def pass_none(func): """ Wrap func so it's not called if its first param is None >>> print_text = pass_none(print) >>> print_text('text') text >>> print_text(None) """ @functools.wraps(func) def wrapper(param, *args, **kwargs): if param is not None: return func(param, *args, **kwargs) return wrapper
Wrap func so it's not called if its first param is None >>> print_text = pass_none(print) >>> print_text('text') text >>> print_text(None)
entailment
def assign_params(func, namespace): """ Assign parameters from namespace where func solicits. >>> def func(x, y=3): ... print(x, y) >>> assigned = assign_params(func, dict(x=2, z=4)) >>> assigned() 2 3 The usual errors are raised if a function doesn't receive its required parameters: >>> assigned = assign_params(func, dict(y=3, z=4)) >>> assigned() Traceback (most recent call last): TypeError: func() ...argument... It even works on methods: >>> class Handler: ... def meth(self, arg): ... print(arg) >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))() crystal """ try: sig = inspect.signature(func) params = sig.parameters.keys() except AttributeError: spec = inspect.getargspec(func) params = spec.args call_ns = { k: namespace[k] for k in params if k in namespace } return functools.partial(func, **call_ns)
Assign parameters from namespace where func solicits. >>> def func(x, y=3): ... print(x, y) >>> assigned = assign_params(func, dict(x=2, z=4)) >>> assigned() 2 3 The usual errors are raised if a function doesn't receive its required parameters: >>> assigned = assign_params(func, dict(y=3, z=4)) >>> assigned() Traceback (most recent call last): TypeError: func() ...argument... It even works on methods: >>> class Handler: ... def meth(self, arg): ... print(arg) >>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))() crystal
entailment
def save_method_args(method): """ Wrap a method such that when it is called, the args and kwargs are saved on the method. >>> class MyClass: ... @save_method_args ... def method(self, a, b): ... print(a, b) >>> my_ob = MyClass() >>> my_ob.method(1, 2) 1 2 >>> my_ob._saved_method.args (1, 2) >>> my_ob._saved_method.kwargs {} >>> my_ob.method(a=3, b='foo') 3 foo >>> my_ob._saved_method.args () >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') True The arguments are stored on the instance, allowing for different instance to save different args. >>> your_ob = MyClass() >>> your_ob.method({str('x'): 3}, b=[4]) {'x': 3} [4] >>> your_ob._saved_method.args ({'x': 3},) >>> my_ob._saved_method.args () """ args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') @functools.wraps(method) def wrapper(self, *args, **kwargs): attr_name = '_saved_' + method.__name__ attr = args_and_kwargs(args, kwargs) setattr(self, attr_name, attr) return method(self, *args, **kwargs) return wrapper
Wrap a method such that when it is called, the args and kwargs are saved on the method. >>> class MyClass: ... @save_method_args ... def method(self, a, b): ... print(a, b) >>> my_ob = MyClass() >>> my_ob.method(1, 2) 1 2 >>> my_ob._saved_method.args (1, 2) >>> my_ob._saved_method.kwargs {} >>> my_ob.method(a=3, b='foo') 3 foo >>> my_ob._saved_method.args () >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') True The arguments are stored on the instance, allowing for different instance to save different args. >>> your_ob = MyClass() >>> your_ob.method({str('x'): 3}, b=[4]) {'x': 3} [4] >>> your_ob._saved_method.args ({'x': 3},) >>> my_ob._saved_method.args ()
entailment
def _wait(self): "ensure at least 1/max_rate seconds from last call" elapsed = time.time() - self.last_called must_wait = 1 / self.max_rate - elapsed time.sleep(max(0, must_wait)) self.last_called = time.time()
ensure at least 1/max_rate seconds from last call
entailment
def row_col_maker(app, fromdocname, all_needs, need_info, need_key, make_ref=False, ref_lookup=False, prefix=''): """ Creates and returns a column. :param app: current sphinx app :param fromdocname: current document :param all_needs: Dictionary of all need objects :param need_info: need_info object, which stores all related need data :param need_key: The key to access the needed data from need_info :param make_ref: If true, creates a reference for the given data in need_key :param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference :param prefix: string, which is used as prefix for the text output :return: column object (nodes.entry) """ row_col = nodes.entry() para_col = nodes.paragraph() if need_key in need_info and need_info[need_key] is not None: if not isinstance(need_info[need_key], (list, set)): data = [need_info[need_key]] else: data = need_info[need_key] for index, datum in enumerate(data): link_id = datum link_part = None if need_key in ['links', 'back_links']: if '.' in datum: link_id = datum.split('.')[0] link_part = datum.split('.')[1] datum_text = prefix + datum text_col = nodes.Text(datum_text, datum_text) if make_ref or ref_lookup: try: ref_col = nodes.reference("", "") if not ref_lookup: ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, need_info['docname']) ref_col['refuri'] += "#" + datum else: temp_need = all_needs[link_id] ref_col['refuri'] = app.builder.get_relative_uri(fromdocname, temp_need['docname']) ref_col['refuri'] += "#" + temp_need["id"] if link_part is not None: ref_col['refuri'] += '.' + link_part except KeyError: para_col += text_col else: ref_col.append(text_col) para_col += ref_col else: para_col += text_col if index + 1 < len(data): para_col += nodes.emphasis("; ", "; ") row_col += para_col return row_col
Creates and returns a column. :param app: current sphinx app :param fromdocname: current document :param all_needs: Dictionary of all need objects :param need_info: need_info object, which stores all related need data :param need_key: The key to access the needed data from need_info :param make_ref: If true, creates a reference for the given data in need_key :param ref_lookup: If true, it uses the data to lookup for a related need and uses its data to create the reference :param prefix: string, which is used as prefix for the text output :return: column object (nodes.entry)
entailment
def insert_blob(filename, hosts=None, table=None): """Upload a file into a blob table """ conn = connect(hosts) container = conn.get_blob_container(table) with open(filename, 'rb') as f: digest = container.put(f) return '{server}/_blobs/{table}/{digest}'.format( server=conn.client.active_servers[0], table=table, digest=digest )
Upload a file into a blob table
entailment
def update_dois(csv_source, write_file=True): """ Get DOI publication info for a batch of DOIs. This is LiPD-independent and only requires a CSV file with all DOIs listed in a single column. The output is LiPD-formatted publication data for each entry. :param str csv_source: Local path to CSV file :param bool write_file: Write output data to JSON file (True), OR pretty print output to console (False) :return none: """ _dois_arr = [] _dois_raw = [] # open the CSV file with open(csv_source, "r") as f: reader = csv.reader(f) for row in reader: # sort the DOIs as an array of DOI strings _dois_arr.append(row[0]) # run the DOI resolver once for each DOI string. for _doi in _dois_arr: _dois_raw.append(_update_doi(_doi)) if write_file: # Write the file new_filename = os.path.splitext(csv_source)[0] write_json_to_file(_dois_raw, new_filename) else: print(json.dumps(_dois_raw, indent=2)) return
Get DOI publication info for a batch of DOIs. This is LiPD-independent and only requires a CSV file with all DOIs listed in a single column. The output is LiPD-formatted publication data for each entry. :param str csv_source: Local path to CSV file :param bool write_file: Write output data to JSON file (True), OR pretty print output to console (False) :return none:
entailment
def write_json_to_file(json_data, filename="metadata"): """ Write all JSON in python dictionary to a new json file. :param any json_data: JSON data :param str filename: Target filename (defaults to 'metadata.jsonld') :return None: """ # Use demjson to maintain unicode characters in output json_bin = demjson.encode(json_data, encoding='utf-8', compactly=False) # Write json to file try: open("{}.json".format(filename), "wb").write(json_bin) except FileNotFoundError as e: print("Error: Writing json to file: {}".format(filename)) return
Write all JSON in python dictionary to a new json file. :param any json_data: JSON data :param str filename: Target filename (defaults to 'metadata.jsonld') :return None:
entailment
def compile_authors(authors): """ Compiles authors "Last, First" into a single list :param list authors: Raw author data retrieved from doi.org :return list: Author objects """ author_list = [] for person in authors: author_list.append({'name': person['family'] + ", " + person['given']}) return author_list
Compiles authors "Last, First" into a single list :param list authors: Raw author data retrieved from doi.org :return list: Author objects
entailment
def compile_fetch(raw, doi_id): """ Loop over Raw and add selected items to Fetch with proper formatting :param dict raw: JSON data from doi.org :param str doi_id: :return dict: """ fetch_dict = OrderedDict() order = {'author': 'author', 'type': 'type', 'identifier': '', 'title': 'title', 'journal': 'container-title', 'pubYear': '', 'volume': 'volume', 'publisher': 'publisher', 'page':'page', 'issue': 'issue'} for k, v in order.items(): try: if k == 'identifier': fetch_dict[k] = [{"type": "doi", "id": doi_id, "url": "http://dx.doi.org/" + doi_id}] elif k == 'author': fetch_dict[k] = compile_authors(raw[v]) elif k == 'pubYear': fetch_dict[k] = compile_date(raw['issued']['date-parts']) else: fetch_dict[k] = raw[v] except KeyError as e: # If we try to add a key that doesn't exist in the raw dict, then just keep going. pass return fetch_dict
Loop over Raw and add selected items to Fetch with proper formatting :param dict raw: JSON data from doi.org :param str doi_id: :return dict:
entailment
def get_data(doi_id): """ Resolve DOI and compile all attributes into one dictionary :param str doi_id: :param int idx: Publication index :return dict: Updated publication dictionary """ fetch_dict = {} try: # Send request to grab metadata at URL print("Requesting : {}".format(doi_id)) url = "http://dx.doi.org/" + doi_id headers = {"accept": "application/rdf+xml;q=0.5, application/citeproc+json;q=1.0"} r = requests.get(url, headers=headers) # DOI 404. Data not retrieved. Log and return original pub if r.status_code == 400 or r.status_code == 404: print("HTTP 404: DOI not found, {}".format(doi_id)) # Ignore other status codes. Run when status is 200 (good response) elif r.status_code == 200: # Load data from http response raw = json.loads(r.text) # Create a new pub dictionary with metadata received fetch_dict = compile_fetch(raw, doi_id) fetch_dict['pubDataUrl'] = 'doi.org' except urllib.error.URLError as e: fetch_dict["ERROR"] = e fetch_dict["doi"] = doi_id print("get_data: URLError: malformed doi: {}, {}".format(doi_id, e)) except Exception as e: fetch_dict["ERROR"] = e fetch_dict["doi"] = doi_id print("get_data: ValueError: cannot resolve dois from this publisher: {}, {}".format(doi_id, e)) return fetch_dict
Resolve DOI and compile all attributes into one dictionary :param str doi_id: :param int idx: Publication index :return dict: Updated publication dictionary
entailment
def clean_doi(doi_string): """ Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids. """ regex = re.compile(r'\b(10[.][0-9]{3,}(?:[.][0-9]+)*/(?:(?!["&\'<>,])\S)+)\b') try: # Returns a list of matching strings m = re.findall(regex, doi_string) except TypeError as e: # If doi_string is None type, return empty list print("TypeError cleaning DOI: {}, {}".format(doi_string, e)) m = [] return m
Use regex to extract all DOI ids from string (i.e. 10.1029/2005pa001215) :param str doi_string: Raw DOI string value from input file. Often not properly formatted. :return list: DOI ids. May contain 0, 1, or multiple ids.
entailment
def log_benchmark(fn, start, end): """ Log a given function and how long the function takes in seconds :param str fn: Function name :param float start: Function start time :param float end: Function end time :return none: """ elapsed = round(end - start, 2) line = ("Benchmark - Function: {} , Time: {} seconds".format(fn, elapsed)) return line
Log a given function and how long the function takes in seconds :param str fn: Function name :param float start: Function start time :param float end: Function end time :return none:
entailment
def update_changelog(): """ Create or update the changelog txt file. Prompt for update description. :return None: """ # description = input("Please enter a short description for this update:\n ") description = 'Placeholder for description here.' # open changelog file for appending. if doesn't exist, creates file. with open('changelog.txt', 'a+') as f: # write update line f.write(str(datetime.datetime.now().strftime("%d %B %Y %I:%M%p")) + '\nDescription: ' + description) return
Create or update the changelog txt file. Prompt for update description. :return None:
entailment
def create_benchmark(name, log_file, level=logging.INFO): """ Creates a logger for function benchmark times :param str name: Name of the logger :param str log_file: Filename :return obj: Logger """ handler = logging.FileHandler(log_file) rtf_handler = RotatingFileHandler(log_file, maxBytes=30000, backupCount=0) formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s') handler.setFormatter(formatter) logger = logging.getLogger(name) logger.setLevel(level) logger.addHandler(handler) logger.addHandler(rtf_handler) return logger
Creates a logger for function benchmark times :param str name: Name of the logger :param str log_file: Filename :return obj: Logger
entailment
def create_logger(name): """ Creates a logger with the below attributes. :param str name: Name of the logger :return obj: Logger """ logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'simple': { 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s' }, 'detailed': { 'format': '%(asctime)s %(module)-17s line:%(lineno)-4d ' '%(levelname)-8s %(message)s' }, 'email': { 'format': 'Timestamp: %(asctime)s\nModule: %(module)s\n' 'Line: %(lineno)d\nMessage: %(message)s' }, }, 'handlers': { # 'stream': { # 'level': 'DEBUG', # 'class': 'logging.StreamHandler', # "formatter": "simple" # }, "file": { "level": "DEBUG", "formatter": "simple", "class": "logging.handlers.RotatingFileHandler", "filename": "debug.log", 'mode': 'a', 'maxBytes': 30000, 'backupCount': 0 } }, 'loggers': { '': { # "handlers": ["stream", "file"], "handlers": ["file"], "level": "DEBUG", 'propagate': True } } }) return logging.getLogger(name)
Creates a logger with the below attributes. :param str name: Name of the logger :return obj: Logger
entailment
def timeit(hosts=None, stmt=None, warmup=30, repeat=None, duration=None, concurrency=1, output_fmt=None, fail_if=None, sample_mode='reservoir'): """Run the given statement a number of times and return the runtime stats Args: fail-if: An expression that causes cr8 to exit with a failure if it evaluates to true. The expression can contain formatting expressions for: - runtime_stats - statement - meta - concurrency - bulk_size For example: --fail-if "{runtime_stats.mean} > 1.34" """ num_lines = 0 log = Logger(output_fmt) with Runner(hosts, concurrency, sample_mode) as runner: version_info = aio.run(runner.client.get_server_version) for line in as_statements(lines_from_stdin(stmt)): runner.warmup(line, warmup) timed_stats = runner.run(line, iterations=repeat, duration=duration) r = Result( version_info=version_info, statement=line, timed_stats=timed_stats, concurrency=concurrency ) log.result(r) if fail_if: eval_fail_if(fail_if, r) num_lines += 1 if num_lines == 0: raise SystemExit( 'No SQL statements provided. Use --stmt or provide statements via stdin')
Run the given statement a number of times and return the runtime stats Args: fail-if: An expression that causes cr8 to exit with a failure if it evaluates to true. The expression can contain formatting expressions for: - runtime_stats - statement - meta - concurrency - bulk_size For example: --fail-if "{runtime_stats.mean} > 1.34"
entailment
def get_download_path(): """ Determine the OS and the associated download folder. :return str Download path: """ if os.name == 'nt': import winreg sub_key = r'SOFTWARE\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders' downloads_guid = '{374DE290-123F-4565-9164-39C4925E467B}' with winreg.OpenKey(winreg.HKEY_CURRENT_USER, sub_key) as key: location = winreg.QueryValueEx(key, downloads_guid)[0] return location else: return os.path.join(os.path.expanduser('~'), 'Downloads')
Determine the OS and the associated download folder. :return str Download path:
entailment
def download_file(src_url, dst_path): """ Use the given URL and destination to download and save a file :param str src_url: Direct URL to lipd file download :param str dst_path: Local path to download file to, including filename and ext. ex. /path/to/filename.lpd :return none: """ if "MD982181" not in src_url: try: print("downloading file from url...") urllib.request.urlretrieve(src_url, dst_path) except Exception as e: print("Error: unable to download from url: {}".format(e)) return
Use the given URL and destination to download and save a file :param str src_url: Direct URL to lipd file download :param str dst_path: Local path to download file to, including filename and ext. ex. /path/to/filename.lpd :return none:
entailment
def wait_until(predicate, timeout=30): """Wait until predicate returns a truthy value or the timeout is reached. >>> wait_until(lambda: True, timeout=10) """ not_expired = Timeout(timeout) while not_expired(): r = predicate() if r: break
Wait until predicate returns a truthy value or the timeout is reached. >>> wait_until(lambda: True, timeout=10)
entailment
def _find_matching_version(versions, version_pattern): """ Return the first matching version >>> _find_matching_version(['1.1.4', '1.0.12', '1.0.5'], '1.0.x') '1.0.12' >>> _find_matching_version(['1.1.4', '1.0.6', '1.0.5'], '2.x.x') """ pattern = fnmatch.translate(version_pattern.replace('x', '*')) return next((v for v in versions if re.match(pattern, v)), None)
Return the first matching version >>> _find_matching_version(['1.1.4', '1.0.12', '1.0.5'], '1.0.x') '1.0.12' >>> _find_matching_version(['1.1.4', '1.0.6', '1.0.5'], '2.x.x')
entailment
def _build_tarball(src_repo) -> str: """ Build a tarball from src and return the path to it """ run = partial(subprocess.run, cwd=src_repo, check=True) run(['git', 'clean', '-xdff']) src_repo = Path(src_repo) if os.path.exists(src_repo / 'es' / 'upstream'): run(['git', 'submodule', 'update', '--init', '--', 'es/upstream']) run(['./gradlew', '--no-daemon', 'clean', 'distTar']) distributions = Path(src_repo) / 'app' / 'build' / 'distributions' return next(distributions.glob('crate-*.tar.gz'))
Build a tarball from src and return the path to it
entailment
def _crates_cache() -> str: """ Return the path to the crates cache folder """ return os.environ.get( 'XDG_CACHE_HOME', os.path.join(os.path.expanduser('~'), '.cache', 'cr8', 'crates'))
Return the path to the crates cache folder
entailment
def get_crate(version, crate_root=None): """Retrieve a Crate tarball, extract it and return the path. Args: version: The Crate version to get. Can be specified in different ways: - A concrete version like '0.55.0' - A version including a `x` as wildcards. Like: '1.1.x' or '1.x.x'. This will use the latest version that matches. - Release branch, like `3.1` - An alias: 'latest-stable' or 'latest-testing' - A URI pointing to a crate tarball crate_root: Where to extract the tarball to. If this isn't specified ``$XDG_CACHE_HOME/.cache/cr8/crates`` will be used. """ if not crate_root: crate_root = _crates_cache() _remove_old_crates(crate_root) if _is_project_repo(version): return _extract_tarball(_build_tarball(version)) m = BRANCH_VERSION_RE.match(version) if m: return _build_from_release_branch(m.group(0), crate_root) uri = _lookup_uri(version) crate_dir = _download_and_extract(uri, crate_root) return crate_dir
Retrieve a Crate tarball, extract it and return the path. Args: version: The Crate version to get. Can be specified in different ways: - A concrete version like '0.55.0' - A version including a `x` as wildcards. Like: '1.1.x' or '1.x.x'. This will use the latest version that matches. - Release branch, like `3.1` - An alias: 'latest-stable' or 'latest-testing' - A URI pointing to a crate tarball crate_root: Where to extract the tarball to. If this isn't specified ``$XDG_CACHE_HOME/.cache/cr8/crates`` will be used.
entailment
def _parse_options(options: List[str]) -> Dict[str, str]: """ Parse repeatable CLI options >>> opts = _parse_options(['cluster.name=foo', 'CRATE_JAVA_OPTS="-Dxy=foo"']) >>> print(json.dumps(opts, sort_keys=True)) {"CRATE_JAVA_OPTS": "\\"-Dxy=foo\\"", "cluster.name": "foo"} """ try: return dict(i.split('=', maxsplit=1) for i in options) except ValueError: raise ArgumentError( f'Option must be in format <key>=<value>, got: {options}')
Parse repeatable CLI options >>> opts = _parse_options(['cluster.name=foo', 'CRATE_JAVA_OPTS="-Dxy=foo"']) >>> print(json.dumps(opts, sort_keys=True)) {"CRATE_JAVA_OPTS": "\\"-Dxy=foo\\"", "cluster.name": "foo"}
entailment
def run_crate( version, env=None, setting=None, crate_root=None, keep_data=False, disable_java_magic=False, ): """Launch a crate instance. Supported version specifications: - Concrete version like "0.55.0" or with wildcard: "1.1.x" - An alias (one of [latest-nightly, latest-stable, latest-testing]) - A URI pointing to a CrateDB tarball (in .tar.gz format) - A URI pointing to a checked out CrateDB repo directory run-crate supports command chaining. To launch a CrateDB node and another sub-command use: cr8 run-crate <ver> -- timeit -s "select 1" --hosts '{node.http_url}' To launch any (blocking) subprocess, prefix the name with '@': cr8 run-crate <version> -- @http '{node.http_url}' If run-crate is invoked using command chaining it will exit once all chained commands finished. The postgres host and port are available as {node.addresses.psql.host} and {node.addresses.psql.port} """ with create_node( version, env, setting, crate_root, keep_data, java_magic=not disable_java_magic, ) as n: try: n.start() n.process.wait() except KeyboardInterrupt: print('Stopping Crate...')
Launch a crate instance. Supported version specifications: - Concrete version like "0.55.0" or with wildcard: "1.1.x" - An alias (one of [latest-nightly, latest-stable, latest-testing]) - A URI pointing to a CrateDB tarball (in .tar.gz format) - A URI pointing to a checked out CrateDB repo directory run-crate supports command chaining. To launch a CrateDB node and another sub-command use: cr8 run-crate <ver> -- timeit -s "select 1" --hosts '{node.http_url}' To launch any (blocking) subprocess, prefix the name with '@': cr8 run-crate <version> -- @http '{node.http_url}' If run-crate is invoked using command chaining it will exit once all chained commands finished. The postgres host and port are available as {node.addresses.psql.host} and {node.addresses.psql.port}
entailment
def start(self): """Start the process. This will block until the Crate cluster is ready to process requests. """ log.info('Starting Crate process') self.process = proc = self.enter_context(subprocess.Popen( self.cmd, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=self.env, universal_newlines=True )) msg = ('CrateDB launching:\n' ' PID: %s\n' ' Logs: %s\n' ' Data: %s') if not self.keep_data: msg += ' (removed on stop)\n' logfile = os.path.join(self.logs_path, self.cluster_name + '.log') log.info( msg, proc.pid, logfile, self.data_path ) self.addresses = DotDict({}) self.monitor.consumers.append(AddrConsumer(self._set_addr)) self.monitor.start(proc) log_lines = [] self.monitor.consumers.append(log_lines.append) spinner = cycle(['/', '-', '\\', '|']) def show_spinner(): if sys.stdout.isatty(): print(next(spinner), end='\r') return True try: wait_until( lambda: show_spinner() and _ensure_running(proc) and self.http_host, timeout=60 ) host = self.addresses.http.host port = self.addresses.http.port wait_until( lambda: _ensure_running(proc) and _is_up(host, port), timeout=30 ) if _has_ssl(host, port): self.http_url = self.http_url.replace('http://', 'https://') wait_until( lambda: show_spinner() and cluster_state_200(self.http_url), timeout=30 ) except (SystemError, TimeoutError): if not log_lines: _try_print_log(logfile) else: for line in log_lines: log.error(line) raise SystemExit("Exiting because CrateDB didn't start correctly") else: self.monitor.consumers.remove(log_lines.append) log.info('Cluster ready to process requests')
Start the process. This will block until the Crate cluster is ready to process requests.
entailment
def _parse(line): """ Parse protocol and bound address from log message >>> AddrConsumer._parse('NONE') (None, None) >>> AddrConsumer._parse('[INFO ][i.c.p.h.CrateNettyHttpServerTransport] [Widderstein] publish_address {127.0.0.1:4200}, bound_addresses {[fe80::1]:4200}, {[::1]:4200}, {127.0.0.1:4200}') ('http', '127.0.0.1:4200') >>> AddrConsumer._parse('[INFO ][o.e.t.TransportService ] [Widderstein] publish_address {127.0.0.1:4300}, bound_addresses {[fe80::1]:4300}, {[::1]:4300}, {127.0.0.1:4300}') ('transport', '127.0.0.1:4300') >>> AddrConsumer._parse('[INFO ][psql ] [Widderstein] publish_address {127.0.0.1:5432}, bound_addresses {127.0.0.1:5432}') ('psql', '127.0.0.1:5432') """ m = AddrConsumer.ADDRESS_RE.match(line) if not m: return None, None protocol = m.group('protocol') protocol = AddrConsumer.PROTOCOL_MAP.get(protocol, protocol) return protocol, m.group('addr')
Parse protocol and bound address from log message >>> AddrConsumer._parse('NONE') (None, None) >>> AddrConsumer._parse('[INFO ][i.c.p.h.CrateNettyHttpServerTransport] [Widderstein] publish_address {127.0.0.1:4200}, bound_addresses {[fe80::1]:4200}, {[::1]:4200}, {127.0.0.1:4200}') ('http', '127.0.0.1:4200') >>> AddrConsumer._parse('[INFO ][o.e.t.TransportService ] [Widderstein] publish_address {127.0.0.1:4300}, bound_addresses {[fe80::1]:4300}, {[::1]:4300}, {127.0.0.1:4300}') ('transport', '127.0.0.1:4300') >>> AddrConsumer._parse('[INFO ][psql ] [Widderstein] publish_address {127.0.0.1:5432}, bound_addresses {127.0.0.1:5432}') ('psql', '127.0.0.1:5432')
entailment
def _calc_block_mean_variance(image, mask, blocksize): """Adaptively determines image background. Args: image: image converted 1-channel image. mask: 1-channel mask, same size as image. blocksize: adaptive algorithm parameter. Returns: image of same size as input with foreground inpainted with background. """ I = image.copy() I_f = I.astype(np.float32) / 255. # Used for mean and std. result = np.zeros( (image.shape[0] / blocksize, image.shape[1] / blocksize), dtype=np.float32) for i in xrange(0, image.shape[0] - blocksize, blocksize): for j in xrange(0, image.shape[1] - blocksize, blocksize): patch = I_f[i:i+blocksize+1, j:j+blocksize+1] mask_patch = mask[i:i+blocksize+1, j:j+blocksize+1] tmp1 = np.zeros((blocksize, blocksize)) tmp2 = np.zeros((blocksize, blocksize)) mean, std_dev = cv2.meanStdDev(patch, tmp1, tmp2, mask_patch) value = 0 if std_dev[0][0] > MEAN_VARIANCE_THRESHOLD: value = mean[0][0] result[i/blocksize, j/blocksize] = value small_image = cv2.resize(I, (image.shape[1] / blocksize, image.shape[0] / blocksize)) res, inpaintmask = cv2.threshold(result, 0.02, 1, cv2.THRESH_BINARY) inpainted = cv2.inpaint(small_image, inpaintmask.astype(np.uint8), 5, cv2.INPAINT_TELEA) res = cv2.resize(inpainted, (image.shape[1], image.shape[0])) return res
Adaptively determines image background. Args: image: image converted 1-channel image. mask: 1-channel mask, same size as image. blocksize: adaptive algorithm parameter. Returns: image of same size as input with foreground inpainted with background.
entailment
def threshold(image, block_size=DEFAULT_BLOCKSIZE, mask=None): """Applies adaptive thresholding to the given image. Args: image: BGRA image. block_size: optional int block_size to use for adaptive thresholding. mask: optional mask. Returns: Thresholded image. """ if mask is None: mask = np.zeros(image.shape[:2], dtype=np.uint8) mask[:] = 255 if len(image.shape) > 2 and image.shape[2] == 4: image = cv2.cvtColor(image, cv2.COLOR_BGRA2GRAY) res = _calc_block_mean_variance(image, mask, block_size) res = image.astype(np.float32) - res.astype(np.float32) + 255 _, res = cv2.threshold(res, 215, 255, cv2.THRESH_BINARY) return res
Applies adaptive thresholding to the given image. Args: image: BGRA image. block_size: optional int block_size to use for adaptive thresholding. mask: optional mask. Returns: Thresholded image.
entailment
def find_version(include_dev_version=True, root='%(pwd)s', version_file='%(root)s/version.txt', version_module_paths=(), git_args=None, vcs_args=None, decrement_dev_version=None, strip_prefix='v', Popen=subprocess.Popen, open=open): """Find an appropriate version number from version control. It's much more convenient to be able to use your version control system's tagging mechanism to derive a version number than to have to duplicate that information all over the place. The default behavior is to write out a ``version.txt`` file which contains the VCS output, for systems where the appropriate VCS is not installed or there is no VCS metadata directory present. ``version.txt`` can (and probably should!) be packaged in release tarballs by way of the ``MANIFEST.in`` file. :param include_dev_version: By default, if there are any commits after the most recent tag (as reported by the VCS), that number will be included in the version number as a ``.post`` suffix. For example, if the most recent tag is ``1.0`` and there have been three commits after that tag, the version number will be ``1.0.post3``. This behavior can be disabled by setting this parameter to ``False``. :param root: The directory of the repository root. The default value is the current working directory, since when running ``setup.py``, this is often (but not always) the same as the current working directory. Standard substitutions are performed on this value. :param version_file: The name of the file where version information will be saved. Reading and writing version files can be disabled altogether by setting this parameter to ``None``. Standard substitutions are performed on this value. :param version_module_paths: A list of python modules which will be automatically generated containing ``__version__`` and ``__sha__`` attributes. For example, with ``package/_version.py`` as a version module path, ``package/__init__.py`` could do ``from package._version import __version__, __sha__``. :param git_args: **Deprecated.** Please use *vcs_args* instead. :param vcs_args: The command to run to get a version. By default, this is automatically guessed from directories present in the repository root. Specify this as a list of string arguments including the program to run, e.g. ``['git', 'describe']``. Standard substitutions are performed on each value in the provided list. :param decrement_dev_version: If ``True``, subtract one from the number of commits after the most recent tag. This is primarily for hg, as hg requires a commit to make a tag. If the VCS used is hg (i.e. the revision starts with ``'hg'``) and *decrement_dev_version* is not specified as ``False``, *decrement_dev_version* will be set to ``True``. :param strip_prefix: A string which will be stripped from the start of version number tags. By default this is ``'v'``, but could be ``'debian/'`` for compatibility with ``git-dch``. :param Popen: Defaults to ``subprocess.Popen``. This is for testing. :param open: Defaults to ``open``. This is for testing. *root*, *version_file*, and *git_args* each support some substitutions: ``%(root)s`` The value provided for *root*. This is not available for the *root* parameter itself. ``%(pwd)s`` The current working directory. ``/`` will automatically be translated into the correct path separator for the current platform, such as ``:`` or ``\``. ``vcversioner`` will perform automatic VCS detection with the following directories, in order, and run the specified commands. ``%(root)s/.git`` ``git --git-dir %(root)s/.git describe --tags --long``. ``--git-dir`` is used to prevent contamination from git repositories which aren't the git repository of your project. ``%(root)s/.hg`` ``hg log -R %(root)s -r . --template '{latesttag}-{latesttagdistance}-hg{node|short}'``. ``-R`` is similarly used to prevent contamination. """ substitutions = {'pwd': os.getcwd()} substitutions['root'] = root % substitutions def substitute(val): return _fix_path(val % substitutions) if version_file is not None: version_file = substitute(version_file) if git_args is not None: warnings.warn( 'passing `git_args is deprecated; please use vcs_args', DeprecationWarning) vcs_args = git_args if vcs_args is None: for path, args in _vcs_args_by_path: if os.path.exists(substitute(path)): vcs_args = args break raw_version = None vcs_output = [] if vcs_args is not None: vcs_args = [substitute(arg) for arg in vcs_args] # try to pull the version from some VCS, or (perhaps) fall back on a # previously-saved version. try: proc = Popen(vcs_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: pass else: stdout, stderr = proc.communicate() raw_version = stdout.strip().decode() vcs_output = stderr.decode().splitlines() version_source = 'VCS' failure = '%r failed' % (vcs_args,) else: failure = 'no VCS could be detected in %(root)r' % substitutions def show_vcs_output(): if not vcs_output: return print('-- VCS output follows --') for line in vcs_output: print(line) # VCS failed if the string is empty if not raw_version: if version_file is None: print('%s.' % (failure,)) show_vcs_output() raise SystemExit(2) elif not os.path.exists(version_file): print("%s and %r isn't present." % (failure, version_file)) print("are you installing from a github tarball?") show_vcs_output() raise SystemExit(2) with open(version_file, 'rb') as infile: raw_version = infile.read().decode() version_source = repr(version_file) # try to parse the version into something usable. try: tag_version, commits, sha = raw_version.rsplit('-', 2) except ValueError: print("%r (from %s) couldn't be parsed into a version." % ( raw_version, version_source)) show_vcs_output() raise SystemExit(2) # remove leading prefix if tag_version.startswith(strip_prefix): tag_version = tag_version[len(strip_prefix):] if version_file is not None: with open(version_file, 'w') as outfile: outfile.write(raw_version) if sha.startswith('hg') and decrement_dev_version is None: decrement_dev_version = True if decrement_dev_version: commits = str(int(commits) - 1) if commits == '0' or not include_dev_version: version = tag_version else: version = '%s.post%s' % (tag_version, commits) for path in version_module_paths: with open(path, 'w') as outfile: outfile.write(""" # This file is automatically generated by setup.py. __version__ = {0} __sha__ = {1} __revision__ = {1} """.format(repr(version).lstrip('u'), repr(sha).lstrip('u'))) return Version(version, commits, sha)
Find an appropriate version number from version control. It's much more convenient to be able to use your version control system's tagging mechanism to derive a version number than to have to duplicate that information all over the place. The default behavior is to write out a ``version.txt`` file which contains the VCS output, for systems where the appropriate VCS is not installed or there is no VCS metadata directory present. ``version.txt`` can (and probably should!) be packaged in release tarballs by way of the ``MANIFEST.in`` file. :param include_dev_version: By default, if there are any commits after the most recent tag (as reported by the VCS), that number will be included in the version number as a ``.post`` suffix. For example, if the most recent tag is ``1.0`` and there have been three commits after that tag, the version number will be ``1.0.post3``. This behavior can be disabled by setting this parameter to ``False``. :param root: The directory of the repository root. The default value is the current working directory, since when running ``setup.py``, this is often (but not always) the same as the current working directory. Standard substitutions are performed on this value. :param version_file: The name of the file where version information will be saved. Reading and writing version files can be disabled altogether by setting this parameter to ``None``. Standard substitutions are performed on this value. :param version_module_paths: A list of python modules which will be automatically generated containing ``__version__`` and ``__sha__`` attributes. For example, with ``package/_version.py`` as a version module path, ``package/__init__.py`` could do ``from package._version import __version__, __sha__``. :param git_args: **Deprecated.** Please use *vcs_args* instead. :param vcs_args: The command to run to get a version. By default, this is automatically guessed from directories present in the repository root. Specify this as a list of string arguments including the program to run, e.g. ``['git', 'describe']``. Standard substitutions are performed on each value in the provided list. :param decrement_dev_version: If ``True``, subtract one from the number of commits after the most recent tag. This is primarily for hg, as hg requires a commit to make a tag. If the VCS used is hg (i.e. the revision starts with ``'hg'``) and *decrement_dev_version* is not specified as ``False``, *decrement_dev_version* will be set to ``True``. :param strip_prefix: A string which will be stripped from the start of version number tags. By default this is ``'v'``, but could be ``'debian/'`` for compatibility with ``git-dch``. :param Popen: Defaults to ``subprocess.Popen``. This is for testing. :param open: Defaults to ``open``. This is for testing. *root*, *version_file*, and *git_args* each support some substitutions: ``%(root)s`` The value provided for *root*. This is not available for the *root* parameter itself. ``%(pwd)s`` The current working directory. ``/`` will automatically be translated into the correct path separator for the current platform, such as ``:`` or ``\``. ``vcversioner`` will perform automatic VCS detection with the following directories, in order, and run the specified commands. ``%(root)s/.git`` ``git --git-dir %(root)s/.git describe --tags --long``. ``--git-dir`` is used to prevent contamination from git repositories which aren't the git repository of your project. ``%(root)s/.hg`` ``hg log -R %(root)s -r . --template '{latesttag}-{latesttagdistance}-hg{node|short}'``. ``-R`` is similarly used to prevent contamination.
entailment
def setup(dist, attr, value): """A hook for simplifying ``vcversioner`` use from distutils. This hook, when installed properly, allows vcversioner to automatically run when specifying a ``vcversioner`` argument to ``setup``. For example:: from setuptools import setup setup( setup_requires=['vcversioner'], vcversioner={}, ) The parameter to the ``vcversioner`` argument is a dict of keyword arguments which :func:`find_version` will be called with. """ dist.metadata.version = find_version(**value).version
A hook for simplifying ``vcversioner`` use from distutils. This hook, when installed properly, allows vcversioner to automatically run when specifying a ``vcversioner`` argument to ``setup``. For example:: from setuptools import setup setup( setup_requires=['vcversioner'], vcversioner={}, ) The parameter to the ``vcversioner`` argument is a dict of keyword arguments which :func:`find_version` will be called with.
entailment
def to_insert(table, d): """Generate an insert statement using the given table and dictionary. Args: table (str): table name d (dict): dictionary with column names as keys and values as values. Returns: tuple of statement and arguments >>> to_insert('doc.foobar', {'name': 'Marvin'}) ('insert into doc.foobar ("name") values (?)', ['Marvin']) """ columns = [] args = [] for key, val in d.items(): columns.append('"{}"'.format(key)) args.append(val) stmt = 'insert into {table} ({columns}) values ({params})'.format( table=table, columns=', '.join(columns), params=', '.join(['?'] * len(columns))) return (stmt, args)
Generate an insert statement using the given table and dictionary. Args: table (str): table name d (dict): dictionary with column names as keys and values as values. Returns: tuple of statement and arguments >>> to_insert('doc.foobar', {'name': 'Marvin'}) ('insert into doc.foobar ("name") values (?)', ['Marvin'])
entailment
def insert_json(table=None, bulk_size=1000, concurrency=25, hosts=None, output_fmt=None): """Insert JSON lines fed into stdin into a Crate cluster. If no hosts are specified the statements will be printed. Args: table: Target table name. bulk_size: Bulk size of the insert statements. concurrency: Number of operations to run concurrently. hosts: hostname:port pairs of the Crate nodes """ if not hosts: return print_only(table) queries = (to_insert(table, d) for d in dicts_from_stdin()) bulk_queries = as_bulk_queries(queries, bulk_size) print('Executing inserts: bulk_size={} concurrency={}'.format( bulk_size, concurrency), file=sys.stderr) stats = Stats() with clients.client(hosts, concurrency=concurrency) as client: f = partial(aio.measure, stats, client.execute_many) try: aio.run_many(f, bulk_queries, concurrency) except clients.SqlException as e: raise SystemExit(str(e)) try: print(format_stats(stats.get(), output_fmt)) except KeyError: if not stats.sampler.values: raise SystemExit('No data received via stdin') raise
Insert JSON lines fed into stdin into a Crate cluster. If no hosts are specified the statements will be printed. Args: table: Target table name. bulk_size: Bulk size of the insert statements. concurrency: Number of operations to run concurrently. hosts: hostname:port pairs of the Crate nodes
entailment
def _get_dominant_angle(lines, domination_type=MEDIAN): """Picks dominant angle of a set of lines. Args: lines: iterable of (x1, y1, x2, y2) tuples that define lines. domination_type: either MEDIAN or MEAN. Returns: Dominant angle value in radians. Raises: ValueError: on unknown domination_type. """ if domination_type == MEDIAN: return _get_median_angle(lines) elif domination_type == MEAN: return _get_mean_angle(lines) else: raise ValueError('Unknown domination type provided: %s' % ( domination_type))
Picks dominant angle of a set of lines. Args: lines: iterable of (x1, y1, x2, y2) tuples that define lines. domination_type: either MEDIAN or MEAN. Returns: Dominant angle value in radians. Raises: ValueError: on unknown domination_type.
entailment
def _normalize_angle(angle, range, step): """Finds an angle that matches the given one modulo step. Increments and decrements the given value with a given step. Args: range: a 2-tuple of min and max target values. step: tuning step. Returns: Normalized value within a given range. """ while angle <= range[0]: angle += step while angle >= range[1]: angle -= step return angle
Finds an angle that matches the given one modulo step. Increments and decrements the given value with a given step. Args: range: a 2-tuple of min and max target values. step: tuning step. Returns: Normalized value within a given range.
entailment
def get_collectors(self, limit=1000, offset=0): """Returns a dict of collectors. Args: limit (int): number of collectors to return offset (int): the offset of where the list of collectors should begin from """ options = { 'limit': limit, 'offset': offset, } request = requests.get(self.url, params=options, auth=self.auth) try: results = request.json()['collectors'] except KeyError: results = request.json() except json.decoder.JSONDecodeError: results = [] return results
Returns a dict of collectors. Args: limit (int): number of collectors to return offset (int): the offset of where the list of collectors should begin from
entailment
def find(self, name): """Returns a dict of collector's details if found. Args: name (str): name of collector searching for """ collectors = self.get_collectors() for collector in collectors: if name.lower() == collector['name'].lower(): self.collector_id = collector['id'] return collector return {'status': 'No results found.'}
Returns a dict of collector's details if found. Args: name (str): name of collector searching for
entailment
def delete(self, collector_id=None): """Delete a collector from inventory. Args: collector_id (int): id of collector (optional) """ cid = self.collector_id if collector_id: cid = collector_id # param to delete id url = '{0}/{1}'.format(self.url, cid) request = requests.delete(url, auth=self.auth) try: # unable to delete collector response = request.json() except ValueError: # returns when collector is deleted # apparently, the request does not return # a json response response = { u'message': u'The request completed successfully.', u'status': 200, } return response
Delete a collector from inventory. Args: collector_id (int): id of collector (optional)
entailment
def info(self, collector_id): """Return a dict of collector. Args: collector_id (int): id of collector (optional) """ cid = self.collector_id if collector_id: cid = collector_id url = '{0}/{1}'.format(self.url, cid) request = requests.get(url, auth=self.auth) return request.json()
Return a dict of collector. Args: collector_id (int): id of collector (optional)
entailment
def _dotnotation_for_nested_dictionary(d, key, dots): """ Flattens nested data structures using dot notation. :param dict d: Original or nested dictionary :param str key: :param dict dots: Dotted dictionary so far :return dict: Dotted dictionary so far """ if key == 'chronData': # Not interested in expanding chronData in dot notation. Keep it as a chunk. dots[key] = d elif isinstance(d, dict): for k in d: _dotnotation_for_nested_dictionary(d[k], key + '.' + k if key else k, dots) elif isinstance(d, list) and \ not all(isinstance(item, (int, float, complex, list)) for item in d): for n, d in enumerate(d): _dotnotation_for_nested_dictionary(d, key + '.' + str(n) if key != "" else key, dots) else: dots[key] = d return dots
Flattens nested data structures using dot notation. :param dict d: Original or nested dictionary :param str key: :param dict dots: Dotted dictionary so far :return dict: Dotted dictionary so far
entailment
def create_dataframe(ensemble): """ Create a data frame from given nested lists of ensemble data :param list ensemble: Ensemble data :return obj: Dataframe """ logger_dataframes.info("enter ens_to_df") # "Flatten" the nested lists. Bring all nested lists up to top-level. Output looks like [ [1,2], [1,2], ... ] ll = unwrap_arrays(ensemble) # Check that list lengths are all equal valid = match_arr_lengths(ll) if valid: # Lists are equal lengths, create the dataframe df = pd.DataFrame(ll) else: # Lists are unequal. Print error and return nothing. df = "empty" print("Error: Numpy Array lengths do not match. Cannot create data frame") logger_dataframes.info("exit ens_to_df") return df
Create a data frame from given nested lists of ensemble data :param list ensemble: Ensemble data :return obj: Dataframe
entailment
def lipd_to_df(metadata, csvs): """ Create an organized collection of data frames from LiPD data :param dict metadata: LiPD data :param dict csvs: Csv data :return dict: One data frame per table, organized in a dictionary by name """ dfs = {} logger_dataframes.info("enter lipd_to_df") # Flatten the dictionary, but ignore the chron data items dict_in_dotted = {} logger_dataframes.info("enter dot_notation") _dotnotation_for_nested_dictionary(metadata, '', dict_in_dotted) dict_in_dotted = collections.OrderedDict(sorted(dict_in_dotted.items())) # Create one data frame for metadata items dfs["metadata"] = pd.DataFrame(list(dict_in_dotted.items()), columns=["Key", "Value"]) # Create data frames for paleo data and chron data items. This does not use LiPD data, it uses the csv data dfs.update(_get_dfs(csvs)) return dfs
Create an organized collection of data frames from LiPD data :param dict metadata: LiPD data :param dict csvs: Csv data :return dict: One data frame per table, organized in a dictionary by name
entailment
def ts_to_df(metadata): """ Create a data frame from one TimeSeries object :param dict metadata: Time Series dictionary :return dict: One data frame per table, organized in a dictionary by name """ logger_dataframes.info("enter ts_to_df") dfs = {} # Plot the variable + values vs year, age, depth (whichever are available) dfs["paleoData"] = pd.DataFrame(_plot_ts_cols(metadata)) # Plot the chronology variables + values in a data frame dfs["chronData"] = _get_key_data(metadata, "chronData_df") # Take out the chronData pandas data frame object if it exists in the metadata # Otherwise, the data frame renderer gets crazy and errors out. if "chronData_df" in metadata: del metadata["chronData_df"] s = collections.OrderedDict(sorted(metadata.items())) # Put key-vars in a data frame to make it easier to visualize dfs["metadata"] = pd.DataFrame(list(s.items()), columns=['Key', 'Value']) logger_dataframes.info("exit ts_to_df") return dfs
Create a data frame from one TimeSeries object :param dict metadata: Time Series dictionary :return dict: One data frame per table, organized in a dictionary by name
entailment
def _plot_ts_cols(ts): """ Get variable + values vs year, age, depth (whichever are available) :param dict ts: TimeSeries dictionary :return dict: Key: variableName, Value: Panda Series object """ logger_dataframes.info("enter get_ts_cols()") d = {} # Not entirely necessary, but this will make the column headers look nicer for the data frame # The column header will be in format "variableName (units)" try: units = " (" + ts["paleoData_units"] + ")" except KeyError as e: units = "" logger_dataframes.warn("get_ts_cols: KeyError: paleoData_units not found, {}".format(e)) try: d[ts["paleoData_variableName"] + units] = ts["paleoData_values"] except KeyError as e: logger_dataframes.warn("get_ts_cols: KeyError: variableName or values not found, {}".format(e)) # Start looking for age, year, depth columns for k, v in ts.items(): if re_pandas_x_num.match(k): try: units = " (" + ts[k + "Units"] + ")" d[k + units] = v except KeyError as e: logger_dataframes.warn("get_ts_cols: KeyError: Special column units, {}, {}".format(k, e)) logger_dataframes.info("exit get_ts_cols: found {}".format(len(d))) return d
Get variable + values vs year, age, depth (whichever are available) :param dict ts: TimeSeries dictionary :return dict: Key: variableName, Value: Panda Series object
entailment
def _get_dfs(csvs): """ LiPD Version 1.2 Create a data frame for each table for the given key :param dict csvs: LiPD metadata dictionary :return dict: paleo data data frames """ logger_dataframes.info("enter get_lipd_cols") # placeholders for the incoming data frames dfs = {"chronData": {}, "paleoData": {}} try: for filename, cols in csvs.items(): tmp = {} for var, data in cols.items(): tmp[var] = pd.Series(data["values"]) if "chron" in filename.lower(): dfs["chronData"][filename] = pd.DataFrame(tmp) elif "paleo" in filename.lower(): dfs["paleoData"][filename] = pd.DataFrame(tmp) except KeyError: logger_dataframes.warn("get_lipd_cols: AttributeError: expected type dict, given type {}".format(type(csvs))) logger_dataframes.info("exit get_lipd_cols") return dfs
LiPD Version 1.2 Create a data frame for each table for the given key :param dict csvs: LiPD metadata dictionary :return dict: paleo data data frames
entailment
def _get_key_data(d, key): """ Generic function to grab dictionary data by key with error handling :return: """ d2 = "" try: d2 = d[key] except KeyError: logger_dataframes.info("get_key_data: KeyError: {}".format(key)) return d2
Generic function to grab dictionary data by key with error handling :return:
entailment
def get_filtered_dfs(lib, expr): """ Main: Get all data frames that match the given expression :return dict: Filenames and data frames (filtered) """ logger_dataframes.info("enter get_filtered_dfs") dfs = {} tt = None # Process all lipds files or one lipds file? specific_files = _check_expr_filename(expr) # Determine the table type wanted if "chron" in expr: tt = "chron" elif "paleo" in expr: tt = "paleo" # Get all filenames of target type. if tt: if specific_files: # The user has specified a single LiPD file to get data frames from. for file in specific_files: if file in lib: lo_meta = lib[file].get_metadata() lo_dfs = lib[file].get_dfs() # Only start a search if this lipds file has data frames available. Otherwise, pointless. if lo_dfs: # Get list of all matching filenames filenames = _match_dfs_expr(lo_meta, expr, tt) # Update our output data frames dictionary dfs.update(_match_filenames_w_dfs(filenames, lo_dfs)) else: print("Unable to find LiPD file in Library: {}".format(file)) # Process all LiPD files in the library. A file has not been specified in the expression. else: # Loop once on each lipds object in the library for ln, lo in lib.items(): # Get the lo_meta = lo.get_metadata() lo_dfs = lo.get_dfs() # Only start a search if this lipds file has data frames available. Otherwise, pointless. if lo_dfs: # Get list of all matching filenames filenames = _match_dfs_expr(lo_meta, expr, tt) # Update our output data frames dictionary dfs.update(_match_filenames_w_dfs(filenames, lo_dfs)) logger_dataframes.info("exit get_filtered_dfs") return dfs
Main: Get all data frames that match the given expression :return dict: Filenames and data frames (filtered)
entailment
def _match_dfs_expr(lo_meta, expr, tt): """ Use the given expression to get all data frames that match the criteria (i.e. "paleo measurement tables") :param dict lo_meta: Lipd object metadata :param str expr: Search expression :param str tt: Table type (chron or paleo) :return list: All filenames that match the expression """ logger_dataframes.info("enter match_dfs_expr") filenames = [] s = "{}Data".format(tt) # Top table level. Going through all tables of certain type (i.e. chron or paleo) for k, v in lo_meta["{}Data".format(tt)].items(): # Inner table level. Get data from one specific table if "measurement" in expr: for k1, v1 in v["{}MeasurementTable".format(tt)].items(): try: f = v1["filename"] if f.endswith(".csv"): filenames.append(f) except KeyError: # Not concerned if the key wasn't found. logger_dataframes.info("match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "measurement")) elif "ensemble" in expr: for k1, v1 in v["{}Model".format(tt)].items(): try: f = v1["ensembleTable"]["filename"] if f.endswith(".csv"): filenames.append(f) except KeyError: # Not concerned if the key wasn't found. logger_dataframes.info("match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "ensemble")) elif "model" in expr: for k1, v1 in v["{}Model".format(tt)].items(): try: f = v1["{}ModelTable".format(tt)]["filename"] if f.endswith(".csv"): filenames.append(f) except KeyError: # Not concerned if the key wasn't found. logger_dataframes.info("match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "model")) elif "dist" in expr: for k1, v1 in v["{}Model".format(tt)].items(): for k2, v2 in v1["distribution"].items(): try: f = v2["filename"] if f.endswith(".csv"): filenames.append(f) except KeyError: # Not concerned if the key wasn't found. logger_dataframes.info( "match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "dist")) logger_dataframes.info("exit match_dfs_expr") return filenames
Use the given expression to get all data frames that match the criteria (i.e. "paleo measurement tables") :param dict lo_meta: Lipd object metadata :param str expr: Search expression :param str tt: Table type (chron or paleo) :return list: All filenames that match the expression
entailment
def _match_filenames_w_dfs(filenames, lo_dfs): """ Match a list of filenames to their data frame counterparts. Return data frames :param list filenames: Filenames of data frames to retrieve :param dict lo_dfs: All data frames :return dict: Filenames and data frames (filtered) """ logger_dataframes.info("enter match_filenames_w_dfs") dfs = {} for filename in filenames: try: if filename in lo_dfs["chronData"]: dfs[filename] = lo_dfs["chronData"][filename] elif filename in lo_dfs["paleoData"]: dfs[filename] = lo_dfs["paleoData"][filename] except KeyError: logger_dataframes.info("filter_dfs: KeyError: missing data frames keys") logger_dataframes.info("exit match_filenames_w_dfs") return dfs
Match a list of filenames to their data frame counterparts. Return data frames :param list filenames: Filenames of data frames to retrieve :param dict lo_dfs: All data frames :return dict: Filenames and data frames (filtered)
entailment
def _check_expr_filename(expr): """ Split the expression and look to see if there's a specific filename that the user wants to process. :param str expr: Search expression :return str: Filename or None """ expr_lst = expr.split() f = [x for x in expr_lst if x not in DATA_FRAMES and x.endswith(".lpd")] return f
Split the expression and look to see if there's a specific filename that the user wants to process. :param str expr: Search expression :return str: Filename or None
entailment