_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q11300
wget
train
def wget(url): """ Download the page into a string """ import urllib.parse request = urllib.request.urlopen(url) filestring = request.read() return filestring
python
{ "resource": "" }
q11301
autodecode
train
def autodecode(b): """ Try to decode ``bytes`` to text - try default encoding first, otherwise try to autodetect Args: b (bytes): byte string Returns: str: decoded text string """ import warnings import chardet try: return b.decode() except UnicodeError: result = chardet.detect(b) if result['confidence'] < 0.95: warnings.warn('autodecode failed with utf-8; guessing %s' % result['encoding']) return result.decode(result['encoding'])
python
{ "resource": "" }
q11302
remove_directories
train
def remove_directories(list_of_paths): """ Removes non-leafs from a list of directory paths """ found_dirs = set('/') for path in list_of_paths: dirs = path.strip().split('/') for i in range(2,len(dirs)): found_dirs.add( '/'.join(dirs[:i]) ) paths = [ path for path in list_of_paths if (path.strip() not in found_dirs) and path.strip()[-1]!='/' ] return paths
python
{ "resource": "" }
q11303
Subprocess._check_file_is_under_workingdir
train
def _check_file_is_under_workingdir(filename, wdir): """ Raise error if input is being staged to a location not underneath the working dir """ p = filename if not os.path.isabs(p): p = os.path.join(wdir, p) targetpath = os.path.realpath(p) wdir = os.path.realpath(wdir) common = os.path.commonprefix([wdir, targetpath]) if len(common) < len(wdir): raise exceptions.PathError( "The subprocess engine does not support input files with absolute paths") return p
python
{ "resource": "" }
q11304
FileReferenceBase._get_access_type
train
def _get_access_type(self, mode): """ Make sure mode is appropriate; return 'b' for binary access and 't' for text """ access_type = None for char in mode: # figure out whether it's binary or text access if char in 'bt': if access_type is not None: raise IOError('File mode "%s" contains contradictory flags' % mode) access_type = char elif char not in 'rbt': raise NotImplementedError( '%s objects are read-only; unsupported mode "%s"'% (type(self), mode)) if access_type is None: access_type = 't' return access_type
python
{ "resource": "" }
q11305
getclosurevars
train
def getclosurevars(func): """ Get the mapping of free variables to their current values. Returns a named tuple of dicts mapping the current nonlocal, global and builtin references as seen by the body of the function. A final set of unbound names that could not be resolved is also provided. Note: Modified function from the Python 3.5 inspect standard library module Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software Foundation; All Rights Reserved" See also py-cloud-compute-cannon/NOTICES. """ if inspect.ismethod(func): func = func.__func__ elif not inspect.isroutine(func): raise TypeError("'{!r}' is not a Python function".format(func)) # AMVMOD: deal with python 2 builtins that don't define these code = getattr(func, '__code__', None) closure = getattr(func, '__closure__', None) co_names = getattr(code, 'co_names', ()) glb = getattr(func, '__globals__', {}) # Nonlocal references are named in co_freevars and resolved # by looking them up in __closure__ by positional index if closure is None: nonlocal_vars = {} else: nonlocal_vars = {var: cell.cell_contents for var, cell in zip(code.co_freevars, func.__closure__)} # Global and builtin references are named in co_names and resolved # by looking them up in __globals__ or __builtins__ global_ns = glb builtin_ns = global_ns.get("__builtins__", builtins.__dict__) if inspect.ismodule(builtin_ns): builtin_ns = builtin_ns.__dict__ global_vars = {} builtin_vars = {} unbound_names = set() for name in co_names: if name in ("None", "True", "False"): # Because these used to be builtins instead of keywords, they # may still show up as name references. We ignore them. continue try: global_vars[name] = global_ns[name] except KeyError: try: builtin_vars[name] = builtin_ns[name] except KeyError: unbound_names.add(name) return {'nonlocal': nonlocal_vars, 'global': global_vars, 'builtin': builtin_vars, 'unbound': unbound_names}
python
{ "resource": "" }
q11306
EngineBase.dump_all_outputs
train
def dump_all_outputs(self, job, target, abspaths=None): """ Default dumping strategy - potentially slow for large numbers of files Subclasses should offer faster implementations, if available """ from pathlib import Path root = Path(native_str(target)) for outputpath, outputfile in job.get_output().items(): path = Path(native_str(outputpath)) # redirect absolute paths into the appropriate subdirectory if path.is_absolute(): if abspaths: path = Path(native_str(abspaths), *path.parts[1:]) else: continue dest = root / path if not dest.parent.is_dir(): dest.parent.mkdir(parents=True) if dest.is_file(): dest.unlink() try: outputfile.put(str(dest)) except IsADirectoryError: if not dest.is_dir(): dest.mkdir(parents=True)
python
{ "resource": "" }
q11307
EngineBase.launch
train
def launch(self, image, command, **kwargs): """ Create a job on this engine Args: image (str): name of the docker image to launch command (str): shell command to run """ if isinstance(command, PythonCall): return PythonJob(self, image, command, **kwargs) else: return Job(self, image, command, **kwargs)
python
{ "resource": "" }
q11308
char_width
train
def char_width(char): """ Get the display length of a unicode character. """ if ord(char) < 128: return 1 elif unicodedata.east_asian_width(char) in ('F', 'W'): return 2 elif unicodedata.category(char) in ('Mn',): return 0 else: return 1
python
{ "resource": "" }
q11309
display_len
train
def display_len(text): """ Get the display length of a string. This can differ from the character length if the string contains wide characters. """ text = unicodedata.normalize('NFD', text) return sum(char_width(char) for char in text)
python
{ "resource": "" }
q11310
filter_regex
train
def filter_regex(names, regex): """ Return a tuple of strings that match the regular expression pattern. """ return tuple(name for name in names if regex.search(name) is not None)
python
{ "resource": "" }
q11311
filter_wildcard
train
def filter_wildcard(names, pattern): """ Return a tuple of strings that match a shell-style wildcard pattern. """ return tuple(name for name in names if fnmatch.fnmatch(name, pattern))
python
{ "resource": "" }
q11312
HelpFeature.match
train
def match(self, obj, attrs): """ Only match if the object contains a non-empty docstring. """ if '__doc__' in attrs: lstrip = getattr(obj.__doc__, 'lstrip', False) return lstrip and any(lstrip())
python
{ "resource": "" }
q11313
PackagedFunction.prepare_namespace
train
def prepare_namespace(self, func): """ Prepares the function to be run after deserializing it. Re-associates any previously bound variables and modules from the closure Returns: callable: ready-to-call function """ if self.is_imethod: to_run = getattr(self.obj, self.imethod_name) else: to_run = func for varname, modulename in self.global_modules.items(): to_run.__globals__[varname] = __import__(modulename) if self.global_closure: to_run.__globals__.update(self.global_closure) if self.global_functions: to_run.__globals__.update(self.global_functions) return to_run
python
{ "resource": "" }
q11314
decimal_format
train
def decimal_format(value, TWOPLACES=Decimal(100) ** -2): 'Format a decimal.Decimal like to 2 decimal places.' if not isinstance(value, Decimal): value = Decimal(str(value)) return value.quantize(TWOPLACES)
python
{ "resource": "" }
q11315
notification_preference
train
def notification_preference(obj_type, profile): '''Display two radio buttons for turning notifications on or off. The default value is is have alerts_on = True. ''' default_alert_value = True if not profile: alerts_on = True else: notifications = profile.get('notifications', {}) alerts_on = notifications.get(obj_type, default_alert_value) return dict(alerts_on=alerts_on, obj_type=obj_type)
python
{ "resource": "" }
q11316
OldRole.committee_object
train
def committee_object(self): '''If the committee id no longer exists in mongo for some reason, this function returns None. ''' if 'committee_id' in self: _id = self['committee_id'] return self.document._old_roles_committees.get(_id) else: return self
python
{ "resource": "" }
q11317
Legislator.context_role
train
def context_role(self, bill=None, vote=None, session=None, term=None): '''Tell this legislator object which session to use when calculating the legisator's context_role for a given bill or vote. ''' # If no hints were given about the context, look for a related bill, # then for a related vote. if not any([bill, vote, session, term]): try: bill = self.bill except AttributeError: # A vote? try: vote = self.vote except AttributeError: # If we're here, this method was called on a # Legislator was that doesn't have a related bill or vote. return '' # If we still have to historical point of reference, figuring # out the context role is impossible. Return emtpy string. if not any([bill, vote, session, term]): return '' # First figure out the term. if bill is not None: term = bill['_term'] elif vote is not None: try: _bill = vote.bill except AttributeError: _bill = BillVote(vote).bill if callable(_bill): _bill = _bill() term = _bill['_term'] if term is None and session is not None: term = term_for_session(self[settings.LEVEL_FIELD], session) # Use the term to get the related roles. First look in the current # roles list, then fail over to the old_roles list. roles = [r for r in self['roles'] if r.get('type') == 'member' and r.get('term') == term] roles = list(filter(None, roles)) if not roles: roles = [r for r in self.get('old_roles', {}).get(term, []) if r.get('type') == 'member'] roles = list(filter(None, roles)) if not roles: # Legislator had no roles for this term. If there is a related # bill ro vote, this shouldn't happen, but could if the # legislator's roles got deleted. return '' # If there's only one applicable role, we're done. if len(roles) == 1: role = roles.pop() self['context_role'] = role return role # If only one of term or session is given and there are multiple roles: if not list(filter(None, [bill, vote])): if term is not None: role = roles[0] self['context_role'] = role return role # Below, use the date of the related bill or vote to determine # which (of multiple) roles applies. # Get the context date. if session is not None: # If we're here, we have multiple roles for a single session. # Try to find the correct one in self.metadata, # else give up. session_data = self.metadata['session_details'][session] for role in roles: role_start = role.get('start_date') role_end = role.get('end_date') # Return the first role that overlaps at all with the # session. session_start = session_data.get('start_date') session_end = session_data.get('end_date') if session_start and session_end: started_during = (role_start < session_start < role_end) ended_during = (role_start < session_end < role_end) if started_during or ended_during: self['context_role'] = role return role else: continue # Return first role from the session? role = roles[0] self['context_role'] = role return role if vote is not None: date = vote['date'] if bill is not None: date = bill['action_dates']['first'] dates_exist = False for role in roles: start_date = role.get('start_date') end_date = role.get('end_date') if start_date and end_date: dates_exist = True if start_date < date < end_date: self['context_role'] = role return role if dates_exist: # If we're here, the context date didn't fall into any of the # legislator's role date ranges. return '' else: # Here the roles didn't have date ranges. Return the last one? role = roles.pop() self['context_role'] = role return role return ''
python
{ "resource": "" }
q11318
Legislator.old_roles_manager
train
def old_roles_manager(self): '''Return old roles, grouped first by term, then by chamber, then by type.''' wrapper = self._old_role_wrapper chamber_getter = operator.methodcaller('get', 'chamber') for term, roles in self.get('old_roles', {}).items(): chamber_roles = defaultdict(lambda: defaultdict(list)) for chamber, roles in itertools.groupby(roles, chamber_getter): for role in roles: role = wrapper(role) typeslug = role['type'].lower().replace(' ', '_') chamber_roles[chamber][typeslug].append(role) yield term, chamber_roles
python
{ "resource": "" }
q11319
NameMatcher._normalize
train
def _normalize(self, name): """ Normalizes a legislator name by stripping titles from the front, converting to lowercase and removing punctuation. """ name = re.sub( r'^(Senator|Representative|Sen\.?|Rep\.?|' 'Hon\.?|Right Hon\.?|Mr\.?|Mrs\.?|Ms\.?|L\'hon\.?|' 'Assembly(member|man|woman)) ', '', name) return name.strip().lower().replace('.', '')
python
{ "resource": "" }
q11320
NameMatcher.match
train
def match(self, name, chamber=None): """ If this matcher has uniquely seen a matching name, return its value. Otherwise, return None. If chamber is set then the search will be limited to legislators with matching chamber. If chamber is None then the search will be cross-chamber. """ try: return self._manual[chamber][name] except KeyError: pass if chamber == 'joint': chamber = None try: return self._codes[chamber][name] except KeyError: pass if chamber not in self._names: logger.warning("Chamber %s is invalid for a legislator." % ( chamber )) return None name = self._normalize(name) return self._names[chamber].get(name, None)
python
{ "resource": "" }
q11321
get_scraper
train
def get_scraper(mod_path, scraper_type): """ import a scraper from the scraper registry """ # act of importing puts it into the registry try: module = importlib.import_module(mod_path) except ImportError as e: raise ScrapeError("could not import %s" % mod_path, e) # now find the class within the module ScraperClass = None for k, v in module.__dict__.items(): if k.startswith('_'): continue if getattr(v, 'scraper_type', None) == scraper_type: if ScraperClass: raise ScrapeError("two %s scrapers found in module %s: %s %s" % (scraper_type, mod_path, ScraperClass, k)) ScraperClass = v if not ScraperClass: raise ScrapeError("no %s scraper found in module %s" % ( scraper_type, mod_path)) return ScraperClass
python
{ "resource": "" }
q11322
Scraper._load_schemas
train
def _load_schemas(self): """ load all schemas into schema dict """ types = ('bill', 'committee', 'person', 'vote', 'event') for type in types: schema_path = os.path.join(os.path.split(__file__)[0], '../schemas/%s.json' % type) self._schema[type] = json.load(open(schema_path)) self._schema[type]['properties'][settings.LEVEL_FIELD] = { 'minLength': 2, 'type': 'string'} # bills & votes self._schema['bill']['properties']['session']['enum'] = \ self.all_sessions() self._schema['vote']['properties']['session']['enum'] = \ self.all_sessions() # legislators terms = [t['name'] for t in self.metadata['terms']] # ugly break here b/c this line is nearly impossible to split self._schema['person']['properties']['roles'][ 'items']['properties']['term']['enum'] = terms
python
{ "resource": "" }
q11323
Scraper.validate_session
train
def validate_session(self, session, latest_only=False): """ Check that a session is present in the metadata dictionary. raises :exc:`~billy.scrape.NoDataForPeriod` if session is invalid :param session: string representing session to check """ if latest_only: if session != self.metadata['terms'][-1]['sessions'][-1]: raise NoDataForPeriod(session) for t in self.metadata['terms']: if session in t['sessions']: return True raise NoDataForPeriod(session)
python
{ "resource": "" }
q11324
Scraper.validate_term
train
def validate_term(self, term, latest_only=False): """ Check that a term is present in the metadata dictionary. raises :exc:`~billy.scrape.NoDataForPeriod` if term is invalid :param term: string representing term to check :param latest_only: if True, will raise exception if term is not the current term (default: False) """ if latest_only: if term == self.metadata['terms'][-1]['name']: return True else: raise NoDataForPeriod(term) for t in self.metadata['terms']: if term == t['name']: return True raise NoDataForPeriod(term)
python
{ "resource": "" }
q11325
SourcedObject.add_source
train
def add_source(self, url, **kwargs): """ Add a source URL from which data related to this object was scraped. :param url: the location of the source """ self['sources'].append(dict(url=url, **kwargs))
python
{ "resource": "" }
q11326
PlaintextColumns._get_column_ends
train
def _get_column_ends(self): '''Guess where the ends of the columns lie. ''' ends = collections.Counter() for line in self.text.splitlines(): for matchobj in re.finditer('\s{2,}', line.lstrip()): ends[matchobj.end()] += 1 return ends
python
{ "resource": "" }
q11327
PlaintextColumns._get_column_boundaries
train
def _get_column_boundaries(self): '''Use the guessed ends to guess the boundaries of the plain text columns. ''' # Try to figure out the most common column boundaries. ends = self._get_column_ends() if not ends: # If there aren't even any nontrivial sequences of whitespace # dividing text, there may be just one column. In which case, # Return a single span, effectively the whole line. return [slice(None, None)] most_common = [] threshold = self.threshold for k, v in collections.Counter(ends.values()).most_common(): if k >= threshold: most_common.append(k) if most_common: boundaries = [] for k, v in ends.items(): if v in most_common: boundaries.append(k) else: # Here there weren't enough boundaries to guess the most common # ones, so just use the apparent boundaries. In other words, we # have only 1 row. Potentially a source of inaccuracy. boundaries = ends.keys() # Convert the boundaries into a list of span slices. boundaries.sort() last_boundary = boundaries[-1] boundaries = zip([0] + boundaries, boundaries) boundaries = list(itertools.starmap(slice, boundaries)) # And get from the last boundary to the line ending. boundaries.append(slice(last_boundary, None)) return boundaries
python
{ "resource": "" }
q11328
PlaintextColumns.getcells
train
def getcells(self, line): '''Using self.boundaries, extract cells from the given line. ''' for boundary in self.boundaries: cell = line.lstrip()[boundary].strip() if cell: for cell in re.split('\s{3,}', cell): yield cell else: yield None
python
{ "resource": "" }
q11329
PlaintextColumns.rows
train
def rows(self): '''Returns an iterator of row tuples. ''' for line in self.text.splitlines(): yield tuple(self.getcells(line))
python
{ "resource": "" }
q11330
PlaintextColumns.cells
train
def cells(self): '''Returns an interator of all cells in the table. ''' for line in self.text.splitlines(): for cell in self.getcells(line): yield cell
python
{ "resource": "" }
q11331
PaginatorBase.range_end
train
def range_end(self): '''"Showing 40 - 50 of 234 results ^ ''' count = self.count range_end = self.range_start + self.limit - 1 if count < range_end: range_end = count return range_end
python
{ "resource": "" }
q11332
GenericIDMatcher.learn_ids
train
def learn_ids(self, item_list): """ read in already set ids on objects """ self._reset_sequence() for item in item_list: key = self.nondup_key_for_item(item) self.ids[key] = item[self.id_key]
python
{ "resource": "" }
q11333
GenericIDMatcher.set_ids
train
def set_ids(self, item_list): """ set ids on an object, using internal mapping then new ids """ self._reset_sequence() for item in item_list: key = self.nondup_key_for_item(item) item[self.id_key] = self.ids.get(key) or self._get_next_id()
python
{ "resource": "" }
q11334
import_committees_from_legislators
train
def import_committees_from_legislators(current_term, abbr): """ create committees from legislators that have committee roles """ # for all current legislators for legislator in db.legislators.find({'roles': {'$elemMatch': { 'term': current_term, settings.LEVEL_FIELD: abbr}}}): # for all committee roles for role in legislator['roles']: if (role['type'] == 'committee member' and 'committee_id' not in role): spec = {settings.LEVEL_FIELD: abbr, 'chamber': role['chamber'], 'committee': role['committee']} if 'subcommittee' in role: spec['subcommittee'] = role['subcommittee'] committee = db.committees.find_one(spec) if not committee: committee = spec committee['_type'] = 'committee' # copy LEVEL_FIELD from legislator to committee committee[settings.LEVEL_FIELD] = \ legislator[settings.LEVEL_FIELD] committee['members'] = [] committee['sources'] = [] if 'subcommittee' not in committee: committee['subcommittee'] = None insert_with_id(committee) for member in committee['members']: if member['leg_id'] == legislator['leg_id']: break else: committee['members'].append( {'name': legislator['full_name'], 'leg_id': legislator['leg_id'], 'role': role.get('position') or 'member'}) for source in legislator['sources']: if source not in committee['sources']: committee['sources'].append(source) db.committees.save(committee, safe=True) role['committee_id'] = committee['_id'] db.legislators.save(legislator, safe=True)
python
{ "resource": "" }
q11335
Bill.add_sponsor
train
def add_sponsor(self, type, name, **kwargs): """ Associate a sponsor with this bill. :param type: the type of sponsorship, e.g. 'primary', 'cosponsor' :param name: the name of the sponsor as provided by the official source """ self['sponsors'].append(dict(type=type, name=name, **kwargs))
python
{ "resource": "" }
q11336
Bill.add_version
train
def add_version(self, name, url, mimetype=None, on_duplicate='error', **kwargs): """ Add a version of the text of this bill. :param name: a name given to this version of the text, e.g. 'As Introduced', 'Version 2', 'As amended', 'Enrolled' :param url: the location of this version on the legislative website. :param mimetype: MIME type of the document :param on_duplicate: What to do if a duplicate is seen: error - default option, raises a ValueError ignore - add the document twice (rarely the right choice) use_new - use the new name, removing the old document use_old - use the old name, not adding the new document If multiple formats are provided, a good rule of thumb is to prefer text, followed by html, followed by pdf/word/etc. """ if not mimetype: raise ValueError('mimetype parameter to add_version is required') if on_duplicate != 'ignore': if url in self._seen_versions: if on_duplicate == 'error': raise ValueError('duplicate version url %s' % url) elif on_duplicate == 'use_new': # delete the old version self['versions'] = [v for v in self['versions'] if v['url'] != url] elif on_duplicate == 'use_old': return # do nothing self._seen_versions.add(url) d = dict(name=name, url=url, mimetype=mimetype, **kwargs) self['versions'].append(d)
python
{ "resource": "" }
q11337
Bill.add_action
train
def add_action(self, actor, action, date, type=None, committees=None, legislators=None, **kwargs): """ Add an action that was performed on this bill. :param actor: a string representing who performed the action. If the action is associated with one of the chambers this should be 'upper' or 'lower'. Alternatively, this could be the name of a committee, a specific legislator, or an outside actor such as 'Governor'. :param action: a string representing the action performed, e.g. 'Introduced', 'Signed by the Governor', 'Amended' :param date: the date/time this action was performed. :param type: a type classification for this action ;param committees: a committee or list of committees to associate with this action """ def _cleanup_list(obj, default): if not obj: obj = default elif isinstance(obj, string_types): obj = [obj] elif not isinstance(obj, list): obj = list(obj) return obj type = _cleanup_list(type, ['other']) committees = _cleanup_list(committees, []) legislators = _cleanup_list(legislators, []) if 'committee' in kwargs: raise ValueError("invalid param 'committee' passed to add_action, " "must use committees") if isinstance(committees, string_types): committees = [committees] related_entities = [] # OK, let's work some magic. for committee in committees: related_entities.append({ "type": "committee", "name": committee }) for legislator in legislators: related_entities.append({ "type": "legislator", "name": legislator }) self['actions'].append(dict(actor=actor, action=action, date=date, type=type, related_entities=related_entities, **kwargs))
python
{ "resource": "" }
q11338
Bill.add_companion
train
def add_companion(self, bill_id, session=None, chamber=None): """ Associate another bill with this one. If session isn't set it will be set to self['session']. """ companion = {'bill_id': bill_id, 'session': session or self['session'], 'chamber': chamber} self['companions'].append(companion)
python
{ "resource": "" }
q11339
metadata
train
def metadata(abbr, __metadata=__metadata): """ Grab the metadata for the given two-letter abbreviation. """ # This data should change very rarely and is queried very often so # cache it here abbr = abbr.lower() if abbr in __metadata: return __metadata[abbr] rv = db.metadata.find_one({'_id': abbr}) __metadata[abbr] = rv return rv
python
{ "resource": "" }
q11340
cd
train
def cd(path): '''Creates the path if it doesn't exist''' old_dir = os.getcwd() try: os.makedirs(path) except OSError: pass os.chdir(path) try: yield finally: os.chdir(old_dir)
python
{ "resource": "" }
q11341
_get_property_dict
train
def _get_property_dict(schema): """ given a schema object produce a nested dictionary of fields """ pdict = {} for k, v in schema['properties'].items(): pdict[k] = {} if 'items' in v and 'properties' in v['items']: pdict[k] = _get_property_dict(v['items']) pdict[settings.LEVEL_FIELD] = {} return pdict
python
{ "resource": "" }
q11342
update
train
def update(old, new, collection, sneaky_update_filter=None): """ update an existing object with a new one, only saving it and setting updated_at if something has changed old old object new new object collection collection to save changed object to sneaky_update_filter a filter for updates to object that should be ignored format is a dict mapping field names to a comparison function that returns True iff there is a change """ # need_save = something has changed need_save = False locked_fields = old.get('_locked_fields', []) for key, value in new.items(): # don't update locked fields if key in locked_fields: continue if old.get(key) != value: if sneaky_update_filter and key in sneaky_update_filter: if sneaky_update_filter[key](old[key], value): old[key] = value need_save = True else: old[key] = value need_save = True # remove old +key field if this field no longer has a + plus_key = '+%s' % key if plus_key in old: del old[plus_key] need_save = True if need_save: old['updated_at'] = datetime.datetime.utcnow() collection.save(old, safe=True) return need_save
python
{ "resource": "" }
q11343
convert_timestamps
train
def convert_timestamps(obj): """ Convert unix timestamps in the scraper output to python datetimes so that they will be saved properly as Mongo datetimes. """ for key in ('date', 'when', 'end', 'start_date', 'end_date'): value = obj.get(key) if value: try: obj[key] = _timestamp_to_dt(value) except TypeError: raise TypeError("expected float for %s, got %s" % (key, value)) for key in ('sources', 'actions', 'votes', 'roles'): for child in obj.get(key, []): convert_timestamps(child) return obj
python
{ "resource": "" }
q11344
_make_plus_helper
train
def _make_plus_helper(obj, fields): """ add a + prefix to any fields in obj that aren't in fields """ new_obj = {} for key, value in obj.items(): if key in fields or key.startswith('_'): # if there's a subschema apply it to a list or subdict if fields.get(key): if isinstance(value, list): value = [_make_plus_helper(item, fields[key]) for item in value] # assign the value (modified potentially) to the new_obj new_obj[key] = value else: # values not in the fields dict get a + new_obj['+%s' % key] = value return new_obj
python
{ "resource": "" }
q11345
make_plus_fields
train
def make_plus_fields(obj): """ Add a '+' to the key of non-standard fields. dispatch to recursive _make_plus_helper based on _type field """ fields = standard_fields.get(obj['_type'], dict()) return _make_plus_helper(obj, fields)
python
{ "resource": "" }
q11346
Metadata.get_object
train
def get_object(cls, abbr): ''' This particular model needs its own constructor in order to take advantage of the metadata cache in billy.util, which would otherwise return unwrapped objects. ''' obj = get_metadata(abbr) if obj is None: msg = 'No metadata found for abbreviation %r' % abbr raise DoesNotExist(msg) return cls(obj)
python
{ "resource": "" }
q11347
Metadata.committees_legislators
train
def committees_legislators(self, *args, **kwargs): '''Return an iterable of committees with all the legislators cached for reference in the Committee model. So do a "select_related" operation on committee members. ''' committees = list(self.committees(*args, **kwargs)) legislators = self.legislators({'active': True}, fields=['full_name', settings.LEVEL_FIELD]) _legislators = {} # This will be a cache of legislator objects used in # the committees.html template. Includes ids in each # legislator's _all_ids field (if it exists.) for obj in legislators: if 'all_ids' in obj: for _id in obj['_all_ids']: _legislators[_id] = obj else: _legislators[obj['_id']] = obj del legislators for com in committees: com._legislators = _legislators return committees
python
{ "resource": "" }
q11348
extract_fields
train
def extract_fields(d, fields, delimiter='|'): """ get values out of an object ``d`` for saving to a csv """ rd = {} for f in fields: v = d.get(f, None) if isinstance(v, (str, unicode)): v = v.encode('utf8') elif isinstance(v, list): v = delimiter.join(v) rd[f] = v return rd
python
{ "resource": "" }
q11349
region_selection
train
def region_selection(request): '''Handle submission of the region selection form in the base template. ''' form = get_region_select_form(request.GET) abbr = form.data.get('abbr') if not abbr or len(abbr) != 2: return redirect('homepage') return redirect('region', abbr=abbr)
python
{ "resource": "" }
q11350
Committee.add_member
train
def add_member(self, legislator, role='member', **kwargs): """ Add a member to the committee object. :param legislator: name of the legislator :param role: role that legislator holds in the committee (eg. chairman) default: 'member' """ self['members'].append(dict(name=legislator, role=role, **kwargs))
python
{ "resource": "" }
q11351
Action.action_display
train
def action_display(self): '''The action text, with any hyperlinked related entities.''' action = self['action'] annotations = [] abbr = self.bill[settings.LEVEL_FIELD] if 'related_entities' in self: for entity in self['related_entities']: name = entity['name'] _id = entity['id'] # If the importer couldn't ID the entity, # skip. if _id is None: continue url = mongoid_2_url(abbr, _id) link = '<a href="%s">%s</a>' % (url, name) if name in action: action = action.replace(entity['name'], link) else: annotations.append(link) if annotations: action += ' (%s)' % ', '.join(annotations) return action
python
{ "resource": "" }
q11352
ActionsManager._bytype
train
def _bytype(self, action_type, action_spec=None): '''Return the most recent date on which action_type occurred. Action spec is a dictionary of key-value attrs to match.''' for action in reversed(self.bill['actions']): if action_type in action['type']: for k, v in action_spec.items(): if action[k] == v: yield action
python
{ "resource": "" }
q11353
BillVote._legislator_objects
train
def _legislator_objects(self): '''A cache of dereferenced legislator objects. ''' kwargs = {} id_getter = operator.itemgetter('leg_id') ids = [] for k in ('yes', 'no', 'other'): ids.extend(map(id_getter, self[k + '_votes'])) objs = db.legislators.find({'_all_ids': {'$in': ids}}, **kwargs) # Handy to keep a reference to the vote on each legislator. objs = list(objs) id_cache = {} for obj in objs: obj.vote = self for _id in obj['_all_ids']: id_cache[_id] = obj return id_cache
python
{ "resource": "" }
q11354
BillVote.legislator_vote_value
train
def legislator_vote_value(self): '''If this vote was accessed through the legislator.votes_manager, return the value of this legislator's vote. ''' if not hasattr(self, 'legislator'): msg = ('legislator_vote_value can only be called ' 'from a vote accessed by legislator.votes_manager.') raise ValueError(msg) leg_id = self.legislator.id for k in ('yes', 'no', 'other'): for leg in self[k + '_votes']: if leg['leg_id'] == leg_id: return k
python
{ "resource": "" }
q11355
BillVote.is_probably_a_voice_vote
train
def is_probably_a_voice_vote(self): '''Guess whether this vote is a "voice vote".''' if '+voice_vote' in self: return True if '+vote_type' in self: if self['+vote_type'] == 'Voice': return True if 'voice vote' in self['motion'].lower(): return True return False
python
{ "resource": "" }
q11356
Event.host
train
def host(self): '''Return the host committee. ''' _id = None for participant in self['participants']: if participant['type'] == 'host': if set(['participant_type', 'id']) < set(participant): # This event uses the id keyname "id". if participant['participant_type'] == 'committee': _id = participant['id'] if _id is None: continue return self.committees_dict.get(_id) else: return participant['participant']
python
{ "resource": "" }
q11357
Event.host_chairs
train
def host_chairs(self): '''Returns a list of members that chair the host committee, including "co-chair" and "chairperson." This could concievalby yield a false positive if the person's title is 'dunce chair'. ''' chairs = [] # Host is guaranteed to be a committe or none. host = self.host() if host is None: return for member, full_member in host.members_objects: if 'chair' in member.get('role', '').lower(): chairs.append((member, full_member)) return chairs
python
{ "resource": "" }
q11358
Event.host_members
train
def host_members(self): '''Return the members of the host committee. ''' host = self.host() if host is None: return for member, full_member in host.members_objects: yield full_member
python
{ "resource": "" }
q11359
update_common
train
def update_common(obj, report): """ do updated_at checks """ # updated checks if obj['updated_at'] >= yesterday: report['_updated_today_count'] += 1 if obj['updated_at'] >= last_month: report['_updated_this_month_count'] += 1 if obj['updated_at'] >= last_year: report['_updated_this_year_count'] += 1
python
{ "resource": "" }
q11360
normalize_rank
train
def normalize_rank(rank): """Normalize a rank in order to be schema-compliant.""" normalized_ranks = { 'BA': 'UNDERGRADUATE', 'BACHELOR': 'UNDERGRADUATE', 'BS': 'UNDERGRADUATE', 'BSC': 'UNDERGRADUATE', 'JUNIOR': 'JUNIOR', 'MAS': 'MASTER', 'MASTER': 'MASTER', 'MS': 'MASTER', 'MSC': 'MASTER', 'PD': 'POSTDOC', 'PHD': 'PHD', 'POSTDOC': 'POSTDOC', 'SENIOR': 'SENIOR', 'STAFF': 'STAFF', 'STUDENT': 'PHD', 'UG': 'UNDERGRADUATE', 'UNDERGRADUATE': 'UNDERGRADUATE', 'VISITING SCIENTIST': 'VISITOR', 'VISITOR': 'VISITOR', } if not rank: return None rank = rank.upper().replace('.', '') return normalized_ranks.get(rank, 'OTHER')
python
{ "resource": "" }
q11361
get_recid_from_ref
train
def get_recid_from_ref(ref_obj): """Retrieve recid from jsonref reference object. If no recid can be parsed, returns None. """ if not isinstance(ref_obj, dict): return None url = ref_obj.get('$ref', '') return maybe_int(url.split('/')[-1])
python
{ "resource": "" }
q11362
absolute_url
train
def absolute_url(relative_url): """Returns an absolute URL from a URL relative to the server root. The base URL is taken from the Flask app config if present, otherwise it falls back to ``http://inspirehep.net``. """ default_server = 'http://inspirehep.net' server = current_app.config.get('SERVER_NAME', default_server) if not re.match('^https?://', server): server = u'http://{}'.format(server) return urllib.parse.urljoin(server, relative_url)
python
{ "resource": "" }
q11363
afs_url
train
def afs_url(file_path): """Convert a file path to a URL pointing to its path on AFS. If ``file_path`` doesn't start with ``/opt/cds-invenio/``, and hence is not on AFS, it returns it unchanged. The base AFS path is taken from the Flask app config if present, otherwise it falls back to ``/afs/cern.ch/project/inspire/PROD``. """ default_afs_path = '/afs/cern.ch/project/inspire/PROD' afs_path = current_app.config.get('LEGACY_AFS_PATH', default_afs_path) if file_path is None: return if file_path.startswith('/opt/cds-invenio/'): file_path = os.path.relpath(file_path, '/opt/cds-invenio/') file_path = os.path.join(afs_path, file_path) return urllib.parse.urljoin('file://', urllib.request.pathname2url(file_path.encode('utf-8'))) return file_path
python
{ "resource": "" }
q11364
strip_empty_values
train
def strip_empty_values(obj): """Recursively strips empty values.""" if isinstance(obj, dict): new_obj = {} for key, val in obj.items(): new_val = strip_empty_values(val) if new_val is not None: new_obj[key] = new_val return new_obj or None elif isinstance(obj, (list, tuple, set)): new_obj = [] for val in obj: new_val = strip_empty_values(val) if new_val is not None: new_obj.append(new_val) return type(obj)(new_obj) or None elif obj or obj is False or obj == 0: return obj else: return None
python
{ "resource": "" }
q11365
dedupe_all_lists
train
def dedupe_all_lists(obj, exclude_keys=()): """Recursively remove duplucates from all lists. Args: obj: collection to deduplicate exclude_keys (Container[str]): key names to ignore for deduplication """ squared_dedupe_len = 10 if isinstance(obj, dict): new_obj = {} for key, value in obj.items(): if key in exclude_keys: new_obj[key] = value else: new_obj[key] = dedupe_all_lists(value) return new_obj elif isinstance(obj, (list, tuple, set)): new_elements = [dedupe_all_lists(v) for v in obj] if len(new_elements) < squared_dedupe_len: new_obj = dedupe_list(new_elements) else: new_obj = dedupe_list_of_dicts(new_elements) return type(obj)(new_obj) else: return obj
python
{ "resource": "" }
q11366
normalize_date_aggressively
train
def normalize_date_aggressively(date): """Normalize date, stripping date parts until a valid date is obtained.""" def _strip_last_part(date): parts = date.split('-') return '-'.join(parts[:-1]) fake_dates = {'0000', '9999'} if date in fake_dates: return None try: return normalize_date(date) except ValueError: if '-' not in date: raise else: new_date = _strip_last_part(date) return normalize_date_aggressively(new_date)
python
{ "resource": "" }
q11367
match_country_name_to_its_code
train
def match_country_name_to_its_code(country_name, city=''): """Try to match country name with its code. Name of the city helps when country_name is "Korea". """ if country_name: country_name = country_name.upper().replace('.', '').strip() if country_to_iso_code.get(country_name): return country_to_iso_code.get(country_name) elif country_name == 'KOREA': if city.upper() in south_korean_cities: return 'KR' else: for c_code, spellings in countries_alternative_spellings.items(): for spelling in spellings: if country_name == spelling: return c_code return None
python
{ "resource": "" }
q11368
match_us_state
train
def match_us_state(state_string): """Try to match a string with one of the states in the US.""" if state_string: state_string = state_string.upper().replace('.', '').strip() if us_state_to_iso_code.get(state_string): return us_state_to_iso_code.get(state_string) else: for code, state_spellings in us_states_alternative_spellings.items(): for spelling in state_spellings: if state_string == spelling: return code return None
python
{ "resource": "" }
q11369
parse_conference_address
train
def parse_conference_address(address_string): """Parse a conference address. This is a pretty dummy address parser. It only extracts country and state (for US) and should be replaced with something better, like Google Geocoding. """ geo_elements = address_string.split(',') city = geo_elements[0] country_name = geo_elements[-1].upper().replace('.', '').strip() us_state = None state = None country_code = None # Try to match the country country_code = match_country_name_to_its_code(country_name, city) if country_code == 'US' and len(geo_elements) > 1: us_state = match_us_state(geo_elements[-2].upper().strip() .replace('.', '')) if not country_code: # Sometimes the country name stores info about U.S. state us_state = match_us_state(country_name) if us_state: state = us_state country_code = 'US' return { 'cities': [ city, ], 'country_code': country_code, 'postal_code': None, 'state': state, }
python
{ "resource": "" }
q11370
parse_institution_address
train
def parse_institution_address(address, city, state_province, country, postal_code, country_code): """Parse an institution address.""" address_list = force_list(address) state_province = match_us_state(state_province) or state_province postal_code = force_list(postal_code) country = force_list(country) country_code = match_country_code(country_code) if isinstance(postal_code, (tuple, list)): postal_code = ', '.join(postal_code) if isinstance(country, (tuple, list)): country = ', '.join(set(country)) if not country_code and country: country_code = match_country_name_to_its_code(country) if not country_code and state_province and state_province in us_state_to_iso_code.values(): country_code = 'US' return { 'cities': force_list(city), 'country_code': country_code, 'postal_address': address_list, 'postal_code': postal_code, 'state': state_province, }
python
{ "resource": "" }
q11371
name
train
def name(self, key, value): """Populate the ``name`` key. Also populates the ``status``, ``birth_date`` and ``death_date`` keys through side effects. """ def _get_title(value): c_value = force_single_element(value.get('c', '')) if c_value != 'title (e.g. Sir)': return c_value def _get_value(value): a_value = force_single_element(value.get('a', '')) q_value = force_single_element(value.get('q', '')) return a_value or normalize_name(q_value) if value.get('d'): dates = value['d'] try: self['death_date'] = normalize_date(dates) except ValueError: dates = dates.split(' - ') if len(dates) == 1: dates = dates[0].split('-') self['birth_date'] = normalize_date(dates[0]) self['death_date'] = normalize_date(dates[1]) self['status'] = force_single_element(value.get('g', '')).lower() return { 'numeration': force_single_element(value.get('b', '')), 'preferred_name': force_single_element(value.get('q', '')), 'title': _get_title(value), 'value': _get_value(value), }
python
{ "resource": "" }
q11372
name2marc
train
def name2marc(self, key, value): """Populates the ``100`` field. Also populates the ``400``, ``880``, and ``667`` fields through side effects. """ result = self.get('100', {}) result['a'] = value.get('value') result['b'] = value.get('numeration') result['c'] = value.get('title') result['q'] = value.get('preferred_name') if 'name_variants' in value: self['400'] = [{'a': el} for el in value['name_variants']] if 'native_names' in value: self['880'] = [{'a': el} for el in value['native_names']] if 'previous_names' in value: prev_names = [ {'a': u'Formerly {}'.format(prev_name)} for prev_name in value['previous_names'] ] self['667'] = prev_names return result
python
{ "resource": "" }
q11373
positions
train
def positions(self, key, value): """Populate the positions field. Also populates the email_addresses field by side effect. """ email_addresses = self.get("email_addresses", []) current = None record = None recid_or_status = force_list(value.get('z')) for el in recid_or_status: if el.lower() == 'current': current = True if value.get('a') else None else: record = get_record_ref(maybe_int(el), 'institutions') rank = normalize_rank(value.get('r')) current_email_addresses = force_list(value.get('m')) non_current_email_addresses = force_list(value.get('o')) email_addresses.extend({ 'value': address, 'current': True, } for address in current_email_addresses) email_addresses.extend({ 'value': address, 'current': False, } for address in non_current_email_addresses) self['email_addresses'] = email_addresses if 'a' not in value: return None return { 'institution': value['a'], 'record': record, 'curated_relation': True if record is not None else None, 'rank': rank, 'start_date': normalize_date(value.get('s')), 'end_date': normalize_date(value.get('t')), 'current': current, }
python
{ "resource": "" }
q11374
email_addresses2marc
train
def email_addresses2marc(self, key, value): """Populate the 595 MARCXML field. Also populates the 371 field as a side effect. """ m_or_o = 'm' if value.get('current') else 'o' element = { m_or_o: value.get('value') } if value.get('hidden'): return element else: self.setdefault('371', []).append(element) return None
python
{ "resource": "" }
q11375
email_addresses595
train
def email_addresses595(self, key, value): """Populates the ``email_addresses`` field using the 595 MARCXML field. Also populates ``_private_notes`` as a side effect. """ emails = self.get('email_addresses', []) if value.get('o'): emails.append({ 'value': value.get('o'), 'current': False, 'hidden': True, }) if value.get('m'): emails.append({ 'value': value.get('m'), 'current': True, 'hidden': True, }) notes = self.get('_private_notes', []) new_note = ( { 'source': value.get('9'), 'value': _private_note, } for _private_note in force_list(value.get('a')) ) notes.extend(new_note) self['_private_notes'] = notes return emails
python
{ "resource": "" }
q11376
arxiv_categories
train
def arxiv_categories(self, key, value): """Populate the ``arxiv_categories`` key. Also populates the ``inspire_categories`` key through side effects. """ def _is_arxiv(category): return category in valid_arxiv_categories() def _is_inspire(category): schema = load_schema('elements/inspire_field') valid_inspire_categories = schema['properties']['term']['enum'] return category in valid_inspire_categories def _normalize(a_value): for category in valid_arxiv_categories(): if a_value.lower() == category.lower(): return normalize_arxiv_category(category) schema = load_schema('elements/inspire_field') valid_inspire_categories = schema['properties']['term']['enum'] for category in valid_inspire_categories: if a_value.lower() == category.lower(): return category field_codes_to_inspire_categories = { 'a': 'Astrophysics', 'b': 'Accelerators', 'c': 'Computing', 'e': 'Experiment-HEP', 'g': 'Gravitation and Cosmology', 'i': 'Instrumentation', 'l': 'Lattice', 'm': 'Math and Math Physics', 'n': 'Theory-Nucl', 'o': 'Other', 'p': 'Phenomenology-HEP', 'q': 'General Physics', 't': 'Theory-HEP', 'x': 'Experiment-Nucl', } return field_codes_to_inspire_categories.get(a_value.lower()) arxiv_categories = self.get('arxiv_categories', []) inspire_categories = self.get('inspire_categories', []) for value in force_list(value): for a_value in force_list(value.get('a')): normalized_a_value = _normalize(a_value) if _is_arxiv(normalized_a_value): arxiv_categories.append(normalized_a_value) elif _is_inspire(normalized_a_value): inspire_categories.append({'term': normalized_a_value}) self['inspire_categories'] = inspire_categories return arxiv_categories
python
{ "resource": "" }
q11377
urls
train
def urls(self, key, value): """Populate the ``url`` key. Also populates the ``ids`` key through side effects. """ description = force_single_element(value.get('y')) url = value.get('u') linkedin_match = LINKEDIN_URL.match(url) twitter_match = TWITTER_URL.match(url) wikipedia_match = WIKIPEDIA_URL.match(url) if linkedin_match: self.setdefault('ids', []).append( { 'schema': 'LINKEDIN', 'value': unquote_url(linkedin_match.group('page')), } ) elif twitter_match: self.setdefault('ids', []).append( { 'schema': 'TWITTER', 'value': twitter_match.group('handle'), } ) elif wikipedia_match: lang = wikipedia_match.group('lang') page = unquote_url(wikipedia_match.group('page')) if lang != 'en': page = ':'.join([lang, page]) self.setdefault('ids', []).append( { 'schema': 'WIKIPEDIA', 'value': page, } ) else: return { 'description': description, 'value': url, }
python
{ "resource": "" }
q11378
new_record
train
def new_record(self, key, value): """Populate the ``new_record`` key. Also populates the ``ids`` key through side effects. """ new_record = self.get('new_record', {}) ids = self.get('ids', []) for value in force_list(value): for id_ in force_list(value.get('a')): ids.append({ 'schema': 'SPIRES', 'value': id_, }) new_recid = force_single_element(value.get('d', '')) if new_recid: new_record = get_record_ref(new_recid, 'authors') self['ids'] = ids return new_record
python
{ "resource": "" }
q11379
isbns
train
def isbns(self, key, value): """Populate the ``isbns`` key.""" def _get_medium(value): def _normalize(medium): schema = load_schema('hep') valid_media = schema['properties']['isbns']['items']['properties']['medium']['enum'] medium = medium.lower().replace('-', '').replace(' ', '') if medium in valid_media: return medium elif medium == 'ebook': return 'online' elif medium == 'paperback': return 'softcover' return '' medium = force_single_element(value.get('b', '')) normalized_medium = _normalize(medium) return normalized_medium def _get_isbn(value): a_value = force_single_element(value.get('a', '')) normalized_a_value = a_value.replace('.', '') if normalized_a_value: return normalize_isbn(normalized_a_value) return { 'medium': _get_medium(value), 'value': _get_isbn(value), }
python
{ "resource": "" }
q11380
dois
train
def dois(self, key, value): """Populate the ``dois`` key. Also populates the ``persistent_identifiers`` key through side effects. """ def _get_first_non_curator_source(sources): sources_without_curator = [el for el in sources if el.upper() != 'CURATOR'] return force_single_element(sources_without_curator) def _get_material(value): MATERIAL_MAP = { 'ebook': 'publication', } q_value = force_single_element(value.get('q', '')) normalized_q_value = q_value.lower() return MATERIAL_MAP.get(normalized_q_value, normalized_q_value) def _is_doi(id_, type_): return (not type_ or type_.upper() == 'DOI') and is_doi(id_) def _is_handle(id_, type_): return (not type_ or type_.upper() == 'HDL') and is_handle(id_) dois = self.get('dois', []) persistent_identifiers = self.get('persistent_identifiers', []) values = force_list(value) for value in values: id_ = force_single_element(value.get('a', '')) material = _get_material(value) schema = force_single_element(value.get('2', '')) sources = force_list(value.get('9')) source = _get_first_non_curator_source(sources) if _is_doi(id_, schema): dois.append({ 'material': material, 'source': source, 'value': normalize_doi(id_), }) else: schema = 'HDL' if _is_handle(id_, schema) else schema persistent_identifiers.append({ 'material': material, 'schema': schema, 'source': source, 'value': id_, }) self['persistent_identifiers'] = persistent_identifiers return dois
python
{ "resource": "" }
q11381
texkeys
train
def texkeys(self, key, value): """Populate the ``texkeys`` key. Also populates the ``external_system_identifiers`` and ``_desy_bookkeeping`` keys through side effects. """ def _is_oai(id_, schema): return id_.startswith('oai:') def _is_desy(id_, schema): return id_ and schema in ('DESY',) def _is_texkey(id_, schema): return id_ and schema in ('INSPIRETeX', 'SPIRESTeX') texkeys = self.get('texkeys', []) external_system_identifiers = self.get('external_system_identifiers', []) _desy_bookkeeping = self.get('_desy_bookkeeping', []) values = force_list(value) for value in values: ids = force_list(value.get('a', '')) other_ids = force_list(value.get('z', '')) schema = force_single_element(value.get('9', '')) for id_ in ids: id_ = id_.strip() if not id_: continue if _is_texkey(id_, schema): texkeys.insert(0, id_) elif _is_oai(id_, schema): continue # XXX: ignored. elif _is_desy(id_, schema): _desy_bookkeeping.append({'identifier': id_}) else: external_system_identifiers.insert(0, { 'schema': schema, 'value': id_, }) for id_ in other_ids: id_ = id_.strip() if not id_: continue if _is_texkey(id_, schema): texkeys.append(id_) elif _is_oai(id_, schema): continue # XXX: ignored. elif _is_desy(id_, schema): _desy_bookkeeping.append({'identifier': id_}) else: external_system_identifiers.append({ 'schema': schema, 'value': id_, }) self['external_system_identifiers'] = external_system_identifiers self['_desy_bookkeeping'] = _desy_bookkeeping return texkeys
python
{ "resource": "" }
q11382
arxiv_eprints
train
def arxiv_eprints(self, key, value): """Populate the ``arxiv_eprints`` key. Also populates the ``report_numbers`` key through side effects. """ def _get_clean_arxiv_eprint(id_): return id_.split(':')[-1] def _is_arxiv_eprint(id_, source): return source.lower() == 'arxiv' def _is_hidden_report_number(other_id, source): return other_id def _get_clean_source(source): if source == 'arXiv:reportnumber': return 'arXiv' return source arxiv_eprints = self.get('arxiv_eprints', []) report_numbers = self.get('report_numbers', []) values = force_list(value) for value in values: id_ = force_single_element(value.get('a', '')) other_id = force_single_element(value.get('z', '')) categories = [normalize_arxiv_category(category) for category in force_list(value.get('c'))] source = force_single_element(value.get('9', '')) if _is_arxiv_eprint(id_, source): arxiv_eprints.append({ 'categories': categories, 'value': _get_clean_arxiv_eprint(id_), }) elif _is_hidden_report_number(other_id, source): report_numbers.append({ 'hidden': True, 'source': _get_clean_source(source), 'value': other_id, }) else: report_numbers.append({ 'source': _get_clean_source(source), 'value': id_, }) self['report_numbers'] = report_numbers return arxiv_eprints
python
{ "resource": "" }
q11383
languages
train
def languages(self, key, value): """Populate the ``languages`` key.""" languages = self.get('languages', []) values = force_list(value.get('a')) for value in values: for language in RE_LANGUAGE.split(value): try: name = language.strip().capitalize() languages.append(pycountry.languages.get(name=name).alpha_2) except KeyError: pass return languages
python
{ "resource": "" }
q11384
languages2marc
train
def languages2marc(self, key, value): """Populate the ``041`` MARC field.""" return {'a': pycountry.languages.get(alpha_2=value).name.lower()}
python
{ "resource": "" }
q11385
record_affiliations
train
def record_affiliations(self, key, value): """Populate the ``record_affiliations`` key.""" record = get_record_ref(value.get('z'), 'institutions') return { 'curated_relation': record is not None, 'record': record, 'value': value.get('a'), }
python
{ "resource": "" }
q11386
document_type
train
def document_type(self, key, value): """Populate the ``document_type`` key. Also populates the ``_collections``, ``citeable``, ``core``, ``deleted``, ``refereed``, ``publication_type``, and ``withdrawn`` keys through side effects. """ schema = load_schema('hep') publication_type_schema = schema['properties']['publication_type'] valid_publication_types = publication_type_schema['items']['enum'] document_type = self.get('document_type', []) publication_type = self.get('publication_type', []) a_values = force_list(value.get('a')) for a_value in a_values: normalized_a_value = a_value.strip().lower() if normalized_a_value == 'arxiv': continue # XXX: ignored. elif normalized_a_value == 'citeable': self['citeable'] = True elif normalized_a_value == 'core': self['core'] = True elif normalized_a_value == 'noncore': self['core'] = False elif normalized_a_value == 'published': self['refereed'] = True elif normalized_a_value == 'withdrawn': self['withdrawn'] = True elif normalized_a_value == 'deleted': self['deleted'] = True elif normalized_a_value in COLLECTIONS_MAP: self.setdefault('_collections', []).append(COLLECTIONS_MAP[normalized_a_value]) elif normalized_a_value in DOCUMENT_TYPE_MAP: document_type.append(DOCUMENT_TYPE_MAP[normalized_a_value]) elif normalized_a_value in valid_publication_types: publication_type.append(normalized_a_value) c_value = force_single_element(value.get('c', '')) normalized_c_value = c_value.strip().lower() if normalized_c_value == 'deleted': self['deleted'] = True self['publication_type'] = publication_type return document_type
python
{ "resource": "" }
q11387
document_type2marc
train
def document_type2marc(self, key, value): """Populate the ``980`` MARC field.""" if value in DOCUMENT_TYPE_REVERSE_MAP and DOCUMENT_TYPE_REVERSE_MAP[value]: return {'a': DOCUMENT_TYPE_REVERSE_MAP[value]}
python
{ "resource": "" }
q11388
references
train
def references(self, key, value): """Populate the ``references`` key.""" def _has_curator_flag(value): normalized_nine_values = [el.upper() for el in force_list(value.get('9'))] return 'CURATOR' in normalized_nine_values def _is_curated(value): return force_single_element(value.get('z')) == '1' and _has_curator_flag(value) def _set_record(el): recid = maybe_int(el) record = get_record_ref(recid, 'literature') rb.set_record(record) rb = ReferenceBuilder() mapping = [ ('0', _set_record), ('a', rb.add_uid), ('b', rb.add_uid), ('c', rb.add_collaboration), ('e', partial(rb.add_author, role='ed.')), ('h', rb.add_refextract_authors_str), ('i', rb.add_uid), ('k', rb.set_texkey), ('m', rb.add_misc), ('o', rb.set_label), ('p', rb.set_publisher), ('q', rb.add_parent_title), ('r', rb.add_report_number), ('s', rb.set_pubnote), ('t', rb.add_title), ('x', rb.add_raw_reference), ('y', rb.set_year), ] for field, method in mapping: for el in force_list(value.get(field)): if el: method(el) for el in dedupe_list(force_list(value.get('u'))): if el: rb.add_url(el) if _is_curated(value): rb.curate() if _has_curator_flag(value): rb.obj['legacy_curated'] = True return rb.obj
python
{ "resource": "" }
q11389
documents
train
def documents(self, key, value): """Populate the ``documents`` key. Also populates the ``figures`` key through side effects. """ def _is_hidden(value): return 'HIDDEN' in [val.upper() for val in value] or None def _is_figure(value): figures_extensions = ['.png'] return value.get('f') in figures_extensions def _is_fulltext(value): return value.get('d', '').lower() == 'fulltext' or None def _get_index_and_caption(value): match = re.compile(r'(^\d{5})?\s*(.*)').match(value) if match: return match.group(1), match.group(2) def _get_key(value): fname = value.get('n', 'document') extension = value.get('f', '') if fname.endswith(extension): return fname return fname + extension def _get_source(value): source = value.get('t', '') if source in ('INSPIRE-PUBLIC', 'Main'): source = None elif source.lower() == 'arxiv': return 'arxiv' return source figures = self.get('figures', []) is_context = value.get('f', '').endswith('context') if is_context: return if _is_figure(value): index, caption = _get_index_and_caption(value.get('d', '')) figures.append({ 'key': _get_key(value), 'caption': caption, 'url': afs_url(value.get('a')), 'order': index, 'source': 'arxiv', # XXX: we don't have any other figures on legacy }) self['figures'] = figures else: return { 'description': value.get('d') if not _is_fulltext(value) else None, 'key': _get_key(value), 'fulltext': _is_fulltext(value), 'hidden': _is_hidden(force_list(value.get('o'))), 'url': afs_url(value.get('a')), 'source': _get_source(value), }
python
{ "resource": "" }
q11390
marcxml2record
train
def marcxml2record(marcxml): """Convert a MARCXML string to a JSON record. Tries to guess which set of rules to use by inspecting the contents of the ``980__a`` MARC field, but falls back to HEP in case nothing matches, because records belonging to special collections logically belong to the Literature collection but don't have ``980__a:HEP``. Args: marcxml(str): a string containing MARCXML. Returns: dict: a JSON record converted from the string. """ marcjson = create_record(marcxml, keep_singletons=False) collections = _get_collections(marcjson) if 'conferences' in collections: return conferences.do(marcjson) elif 'data' in collections: return data.do(marcjson) elif 'experiment' in collections: return experiments.do(marcjson) elif 'hepnames' in collections: return hepnames.do(marcjson) elif 'institution' in collections: return institutions.do(marcjson) elif 'job' in collections or 'jobhidden' in collections: return jobs.do(marcjson) elif 'journals' in collections or 'journalsnew' in collections: return journals.do(marcjson) return hep.do(marcjson)
python
{ "resource": "" }
q11391
record2marcxml
train
def record2marcxml(record): """Convert a JSON record to a MARCXML string. Deduces which set of rules to use by parsing the ``$schema`` key, as it unequivocally determines which kind of record we have. Args: record(dict): a JSON record. Returns: str: a MARCXML string converted from the record. """ schema_name = _get_schema_name(record) if schema_name == 'hep': marcjson = hep2marc.do(record) elif schema_name == 'authors': marcjson = hepnames2marc.do(record) else: raise NotImplementedError(u'JSON -> MARC rules missing for "{}"'.format(schema_name)) record = RECORD() for key, values in sorted(iteritems(marcjson)): tag, ind1, ind2 = _parse_key(key) if _is_controlfield(tag, ind1, ind2): value = force_single_element(values) if not isinstance(value, text_type): value = text_type(value) record.append(CONTROLFIELD(_strip_invalid_chars_for_xml(value), {'tag': tag})) else: for value in force_list(values): datafield = DATAFIELD({'tag': tag, 'ind1': ind1, 'ind2': ind2}) for code, els in sorted(iteritems(value)): for el in force_list(els): if not isinstance(el, text_type): el = text_type(el) datafield.append(SUBFIELD(_strip_invalid_chars_for_xml(el), {'code': code})) record.append(datafield) return tostring(record, encoding='utf8', pretty_print=True)
python
{ "resource": "" }
q11392
number_of_pages
train
def number_of_pages(self, key, value): """Populate the ``number_of_pages`` key.""" result = maybe_int(force_single_element(value.get('a', ''))) if result and result > 0: return result
python
{ "resource": "" }
q11393
nonfirst_authors
train
def nonfirst_authors(self, key, value): """Populate ``700`` MARC field. Also populates the ``701`` MARC field through side-effects. """ field_700 = self.get('700__', []) field_701 = self.get('701__', []) is_supervisor = any(el.lower().startswith('dir') for el in force_list(value.get('e', ''))) if is_supervisor: field_701.append(_converted_author(value)) else: field_700.append(_converted_author(value)) self['701__'] = field_701 return field_700
python
{ "resource": "" }
q11394
urls
train
def urls(self, key, value): """Populate the ``8564`` MARC field. Also populate the ``FFT`` field through side effects. """ def _is_preprint(value): return value.get('y', '').lower() == 'preprint' def _is_fulltext(value): return value['u'].endswith('.pdf') and value['u'].startswith('http://cds.cern.ch') def _is_local_copy(value): return 'local copy' in value.get('y', '') def _is_ignored_domain(value): ignored_domains = ['http://cdsweb.cern.ch', 'http://cms.cern.ch', 'http://cmsdoc.cern.ch', 'http://documents.cern.ch', 'http://preprints.cern.ch', 'http://cds.cern.ch', 'http://arxiv.org'] return any(value['u'].startswith(domain) for domain in ignored_domains) field_8564 = self.get('8564_', []) field_FFT = self.get('FFT__', []) if 'u' not in value: return field_8564 url = escape_url(value['u']) if _is_fulltext(value) and not _is_preprint(value): if _is_local_copy(value): description = value.get('y', '').replace('local copy', 'on CERN Document Server') field_8564.append({ 'u': url, 'y': description, }) else: _, file_name = os.path.split(urllib.parse.urlparse(value['u']).path) _, extension = os.path.splitext(file_name) field_FFT.append({ 't': 'CDS', 'a': url, 'd': value.get('y', ''), 'n': file_name, 'f': extension, }) elif not _is_ignored_domain(value): field_8564.append({ 'u': url, 'y': value.get('y'), }) self['FFT__'] = field_FFT return field_8564
python
{ "resource": "" }
q11395
titles
train
def titles(self, key, value): """Populate the ``titles`` key.""" if not key.startswith('245'): return { 'source': value.get('9'), 'subtitle': value.get('b'), 'title': value.get('a'), } self.setdefault('titles', []).insert(0, { 'source': value.get('9'), 'subtitle': value.get('b'), 'title': value.get('a'), })
python
{ "resource": "" }
q11396
title_translations
train
def title_translations(self, key, value): """Populate the ``title_translations`` key.""" return { 'language': langdetect.detect(value.get('a')), 'source': value.get('9'), 'subtitle': value.get('b'), 'title': value.get('a'), }
python
{ "resource": "" }
q11397
titles2marc
train
def titles2marc(self, key, values): """Populate the ``246`` MARC field. Also populates the ``245`` MARC field through side effects. """ first, rest = values[0], values[1:] self.setdefault('245', []).append({ 'a': first.get('title'), 'b': first.get('subtitle'), '9': first.get('source'), }) return [ { 'a': value.get('title'), 'b': value.get('subtitle'), '9': value.get('source'), } for value in rest ]
python
{ "resource": "" }
q11398
title_translations2marc
train
def title_translations2marc(self, key, value): """Populate the ``242`` MARC field.""" return { 'a': value.get('title'), 'b': value.get('subtitle'), '9': value.get('source'), }
python
{ "resource": "" }
q11399
imprints
train
def imprints(self, key, value): """Populate the ``imprints`` key.""" return { 'place': value.get('a'), 'publisher': value.get('b'), 'date': normalize_date_aggressively(value.get('c')), }
python
{ "resource": "" }