code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
def _strip_last_part(date):
parts = date.split('-')
return '-'.join(parts[:-1])
fake_dates = {'0000', '9999'}
if date in fake_dates:
return None
try:
return normalize_date(date)
except ValueError:
if '-' not in date:
raise
else:
new_date = _strip_last_part(date)
return normalize_date_aggressively(new_date)
|
def normalize_date_aggressively(date)
|
Normalize date, stripping date parts until a valid date is obtained.
| 3.236566
| 3.020528
| 1.071523
|
if country_name:
country_name = country_name.upper().replace('.', '').strip()
if country_to_iso_code.get(country_name):
return country_to_iso_code.get(country_name)
elif country_name == 'KOREA':
if city.upper() in south_korean_cities:
return 'KR'
else:
for c_code, spellings in countries_alternative_spellings.items():
for spelling in spellings:
if country_name == spelling:
return c_code
return None
|
def match_country_name_to_its_code(country_name, city='')
|
Try to match country name with its code.
Name of the city helps when country_name is "Korea".
| 3.050684
| 3.073332
| 0.992631
|
if state_string:
state_string = state_string.upper().replace('.', '').strip()
if us_state_to_iso_code.get(state_string):
return us_state_to_iso_code.get(state_string)
else:
for code, state_spellings in us_states_alternative_spellings.items():
for spelling in state_spellings:
if state_string == spelling:
return code
return None
|
def match_us_state(state_string)
|
Try to match a string with one of the states in the US.
| 2.633873
| 2.666413
| 0.987796
|
geo_elements = address_string.split(',')
city = geo_elements[0]
country_name = geo_elements[-1].upper().replace('.', '').strip()
us_state = None
state = None
country_code = None
# Try to match the country
country_code = match_country_name_to_its_code(country_name, city)
if country_code == 'US' and len(geo_elements) > 1:
us_state = match_us_state(geo_elements[-2].upper().strip()
.replace('.', ''))
if not country_code:
# Sometimes the country name stores info about U.S. state
us_state = match_us_state(country_name)
if us_state:
state = us_state
country_code = 'US'
return {
'cities': [
city,
],
'country_code': country_code,
'postal_code': None,
'state': state,
}
|
def parse_conference_address(address_string)
|
Parse a conference address.
This is a pretty dummy address parser. It only extracts country
and state (for US) and should be replaced with something better,
like Google Geocoding.
| 3.347648
| 3.24589
| 1.03135
|
address_list = force_list(address)
state_province = match_us_state(state_province) or state_province
postal_code = force_list(postal_code)
country = force_list(country)
country_code = match_country_code(country_code)
if isinstance(postal_code, (tuple, list)):
postal_code = ', '.join(postal_code)
if isinstance(country, (tuple, list)):
country = ', '.join(set(country))
if not country_code and country:
country_code = match_country_name_to_its_code(country)
if not country_code and state_province and state_province in us_state_to_iso_code.values():
country_code = 'US'
return {
'cities': force_list(city),
'country_code': country_code,
'postal_address': address_list,
'postal_code': postal_code,
'state': state_province,
}
|
def parse_institution_address(address, city, state_province,
country, postal_code, country_code)
|
Parse an institution address.
| 2.556016
| 2.5166
| 1.015662
|
def _is_schema_inspire_bai(id_, schema):
return schema == 'INSPIRE BAI'
def _is_schema_inspire_id(id_, schema):
return schema == 'INSPIRE ID'
def _is_schema_spires(id_, schema):
return schema == 'SPIRES'
def _is_schema_linkedin(id, schema):
return schema == 'LINKEDIN'
def _is_schema_twitter(id, schema):
return schema == 'TWITTER'
id_ = value.get('value')
schema = value.get('schema')
if _is_schema_spires(id_, schema):
self.setdefault('970', []).append({'a': id_})
elif _is_schema_linkedin(id_, schema):
self.setdefault('8564', []).append(
{
'u': u'https://www.linkedin.com/in/{id}'.format(id=quote_url(id_)),
'y': 'LINKEDIN',
}
)
elif _is_schema_twitter(id_, schema):
self.setdefault('8564', []).append(
{
'u': u'https://twitter.com/{id}'.format(id=id_),
'y': 'TWITTER',
}
)
elif _is_schema_inspire_id(id_, schema):
return {
'a': id_,
'9': 'INSPIRE',
}
elif _is_schema_inspire_bai(id_, schema):
return {
'a': id_,
'9': 'BAI',
}
else:
return {
'a': id_,
'9': schema,
}
|
def ids2marc(self, key, value)
|
Populate the ``035`` MARC field.
Also populates the ``8564`` and ``970`` MARC field through side effects.
| 2.385497
| 2.210515
| 1.079159
|
def _get_title(value):
c_value = force_single_element(value.get('c', ''))
if c_value != 'title (e.g. Sir)':
return c_value
def _get_value(value):
a_value = force_single_element(value.get('a', ''))
q_value = force_single_element(value.get('q', ''))
return a_value or normalize_name(q_value)
if value.get('d'):
dates = value['d']
try:
self['death_date'] = normalize_date(dates)
except ValueError:
dates = dates.split(' - ')
if len(dates) == 1:
dates = dates[0].split('-')
self['birth_date'] = normalize_date(dates[0])
self['death_date'] = normalize_date(dates[1])
self['status'] = force_single_element(value.get('g', '')).lower()
return {
'numeration': force_single_element(value.get('b', '')),
'preferred_name': force_single_element(value.get('q', '')),
'title': _get_title(value),
'value': _get_value(value),
}
|
def name(self, key, value)
|
Populate the ``name`` key.
Also populates the ``status``, ``birth_date`` and ``death_date`` keys through side effects.
| 3.160582
| 3.004469
| 1.05196
|
result = self.get('100', {})
result['a'] = value.get('value')
result['b'] = value.get('numeration')
result['c'] = value.get('title')
result['q'] = value.get('preferred_name')
if 'name_variants' in value:
self['400'] = [{'a': el} for el in value['name_variants']]
if 'native_names' in value:
self['880'] = [{'a': el} for el in value['native_names']]
if 'previous_names' in value:
prev_names = [
{'a': u'Formerly {}'.format(prev_name)}
for prev_name in value['previous_names']
]
self['667'] = prev_names
return result
|
def name2marc(self, key, value)
|
Populates the ``100`` field.
Also populates the ``400``, ``880``, and ``667`` fields through side
effects.
| 3.806011
| 3.227695
| 1.179173
|
email_addresses = self.get("email_addresses", [])
current = None
record = None
recid_or_status = force_list(value.get('z'))
for el in recid_or_status:
if el.lower() == 'current':
current = True if value.get('a') else None
else:
record = get_record_ref(maybe_int(el), 'institutions')
rank = normalize_rank(value.get('r'))
current_email_addresses = force_list(value.get('m'))
non_current_email_addresses = force_list(value.get('o'))
email_addresses.extend({
'value': address,
'current': True,
} for address in current_email_addresses)
email_addresses.extend({
'value': address,
'current': False,
} for address in non_current_email_addresses)
self['email_addresses'] = email_addresses
if 'a' not in value:
return None
return {
'institution': value['a'],
'record': record,
'curated_relation': True if record is not None else None,
'rank': rank,
'start_date': normalize_date(value.get('s')),
'end_date': normalize_date(value.get('t')),
'current': current,
}
|
def positions(self, key, value)
|
Populate the positions field.
Also populates the email_addresses field by side effect.
| 3.678184
| 3.530628
| 1.041793
|
m_or_o = 'm' if value.get('current') else 'o'
element = {
m_or_o: value.get('value')
}
if value.get('hidden'):
return element
else:
self.setdefault('371', []).append(element)
return None
|
def email_addresses2marc(self, key, value)
|
Populate the 595 MARCXML field.
Also populates the 371 field as a side effect.
| 6.524986
| 5.58241
| 1.168847
|
emails = self.get('email_addresses', [])
if value.get('o'):
emails.append({
'value': value.get('o'),
'current': False,
'hidden': True,
})
if value.get('m'):
emails.append({
'value': value.get('m'),
'current': True,
'hidden': True,
})
notes = self.get('_private_notes', [])
new_note = (
{
'source': value.get('9'),
'value': _private_note,
} for _private_note in force_list(value.get('a'))
)
notes.extend(new_note)
self['_private_notes'] = notes
return emails
|
def email_addresses595(self, key, value)
|
Populates the ``email_addresses`` field using the 595 MARCXML field.
Also populates ``_private_notes`` as a side effect.
| 3.694863
| 3.129433
| 1.180681
|
def _is_arxiv(category):
return category in valid_arxiv_categories()
def _is_inspire(category):
schema = load_schema('elements/inspire_field')
valid_inspire_categories = schema['properties']['term']['enum']
return category in valid_inspire_categories
def _normalize(a_value):
for category in valid_arxiv_categories():
if a_value.lower() == category.lower():
return normalize_arxiv_category(category)
schema = load_schema('elements/inspire_field')
valid_inspire_categories = schema['properties']['term']['enum']
for category in valid_inspire_categories:
if a_value.lower() == category.lower():
return category
field_codes_to_inspire_categories = {
'a': 'Astrophysics',
'b': 'Accelerators',
'c': 'Computing',
'e': 'Experiment-HEP',
'g': 'Gravitation and Cosmology',
'i': 'Instrumentation',
'l': 'Lattice',
'm': 'Math and Math Physics',
'n': 'Theory-Nucl',
'o': 'Other',
'p': 'Phenomenology-HEP',
'q': 'General Physics',
't': 'Theory-HEP',
'x': 'Experiment-Nucl',
}
return field_codes_to_inspire_categories.get(a_value.lower())
arxiv_categories = self.get('arxiv_categories', [])
inspire_categories = self.get('inspire_categories', [])
for value in force_list(value):
for a_value in force_list(value.get('a')):
normalized_a_value = _normalize(a_value)
if _is_arxiv(normalized_a_value):
arxiv_categories.append(normalized_a_value)
elif _is_inspire(normalized_a_value):
inspire_categories.append({'term': normalized_a_value})
self['inspire_categories'] = inspire_categories
return arxiv_categories
|
def arxiv_categories(self, key, value)
|
Populate the ``arxiv_categories`` key.
Also populates the ``inspire_categories`` key through side effects.
| 2.859147
| 2.716483
| 1.052518
|
name_field = self.get('100', {})
if 'd' in name_field:
if int(name_field['d'].split('-')[0]) > int(value.split('-')[0]):
dates_field = ' - '.join([value, name_field['d']])
else:
dates_field = ' - '.join([name_field['d'], value])
else:
dates_field = value
name_field['d'] = dates_field
return name_field
|
def birth_and_death_date2marc(self, key, value)
|
Populate the ``100__d`` MARC field, which includes the birth and the death date.
By not using the decorator ```for_each_value```, the values of the fields
```birth_date``` and ```death_date``` are both added to ```values``` as a list.
| 3.089775
| 2.974053
| 1.03891
|
description = force_single_element(value.get('y'))
url = value.get('u')
linkedin_match = LINKEDIN_URL.match(url)
twitter_match = TWITTER_URL.match(url)
wikipedia_match = WIKIPEDIA_URL.match(url)
if linkedin_match:
self.setdefault('ids', []).append(
{
'schema': 'LINKEDIN',
'value': unquote_url(linkedin_match.group('page')),
}
)
elif twitter_match:
self.setdefault('ids', []).append(
{
'schema': 'TWITTER',
'value': twitter_match.group('handle'),
}
)
elif wikipedia_match:
lang = wikipedia_match.group('lang')
page = unquote_url(wikipedia_match.group('page'))
if lang != 'en':
page = ':'.join([lang, page])
self.setdefault('ids', []).append(
{
'schema': 'WIKIPEDIA',
'value': page,
}
)
else:
return {
'description': description,
'value': url,
}
|
def urls(self, key, value)
|
Populate the ``url`` key.
Also populates the ``ids`` key through side effects.
| 2.562108
| 2.506226
| 1.022297
|
new_record = self.get('new_record', {})
ids = self.get('ids', [])
for value in force_list(value):
for id_ in force_list(value.get('a')):
ids.append({
'schema': 'SPIRES',
'value': id_,
})
new_recid = force_single_element(value.get('d', ''))
if new_recid:
new_record = get_record_ref(new_recid, 'authors')
self['ids'] = ids
return new_record
|
def new_record(self, key, value)
|
Populate the ``new_record`` key.
Also populates the ``ids`` key through side effects.
| 6.566931
| 5.552187
| 1.182765
|
def _is_deleted(value):
return force_single_element(value.get('c', '')).upper() == 'DELETED'
def _is_stub(value):
return not (force_single_element(value.get('a', '')).upper() == 'USEFUL')
deleted = self.get('deleted')
stub = self.get('stub')
for value in force_list(value):
deleted = not deleted and _is_deleted(value)
stub = not stub and _is_stub(value)
self['stub'] = stub
return deleted
|
def deleted(self, key, value)
|
Populate the ``deleted`` key.
Also populates the ``stub`` key through side effects.
| 4.938163
| 4.135978
| 1.193953
|
value = force_list(value)
def _get_ids(value):
ids = {
'i': [],
'j': [],
}
if value.get('ids'):
for _id in value.get('ids'):
if _id.get('schema') == 'INSPIRE ID':
ids['i'].append(_id.get('value'))
elif _id.get('schema') == 'ORCID':
ids['j'].append('ORCID:' + _id.get('value'))
elif _id.get('schema') == 'JACOW':
ids['j'].append(_id.get('value'))
elif _id.get('schema') == 'CERN':
ids['j'].append('CCID-' + _id.get('value')[5:])
return ids
def _get_affiliations(value):
return [
aff.get('value') for aff in value.get('affiliations', [])
]
def _get_affiliations_identifiers(value):
return [
u'{}:{}'.format(aff.get('schema'), aff.get('value')) for aff in value.get('affiliations_identifiers', [])
]
def _get_inspire_roles(value):
values = force_list(value.get('inspire_roles'))
return ['ed.' for role in values if role == 'editor']
def _get_raw_affiliations(value):
return [
aff.get('value') for aff in value.get('raw_affiliations', [])
]
def get_value_100_700(value):
ids = _get_ids(value)
return {
'a': value.get('full_name'),
'e': _get_inspire_roles(value),
'q': value.get('alternative_names'),
'i': ids.get('i'),
'j': ids.get('j'),
'm': value.get('emails'),
't': _get_affiliations_identifiers(value),
'u': _get_affiliations(value),
'v': _get_raw_affiliations(value),
}
def get_value_701(value):
ids = _get_ids(value)
return {
'a': value.get('full_name'),
'q': value.get('alternative_names'),
'i': ids.get('i'),
'j': ids.get('j'),
'u': _get_affiliations(value),
'v': _get_raw_affiliations(value),
}
if len(value) > 1:
self["700"] = []
self["701"] = []
for author in value[1:]:
is_supervisor = 'supervisor' in author.get('inspire_roles', [])
if is_supervisor:
self["701"].append(get_value_701(author))
else:
self["700"].append(get_value_100_700(author))
return get_value_100_700(value[0])
|
def authors2marc(self, key, value)
|
Populate the ``100`` MARC field.
Also populates the ``700`` and the ``701`` MARC fields through side effects.
| 2.234745
| 2.21596
| 1.008477
|
def _get_medium(value):
def _normalize(medium):
schema = load_schema('hep')
valid_media = schema['properties']['isbns']['items']['properties']['medium']['enum']
medium = medium.lower().replace('-', '').replace(' ', '')
if medium in valid_media:
return medium
elif medium == 'ebook':
return 'online'
elif medium == 'paperback':
return 'softcover'
return ''
medium = force_single_element(value.get('b', ''))
normalized_medium = _normalize(medium)
return normalized_medium
def _get_isbn(value):
a_value = force_single_element(value.get('a', ''))
normalized_a_value = a_value.replace('.', '')
if normalized_a_value:
return normalize_isbn(normalized_a_value)
return {
'medium': _get_medium(value),
'value': _get_isbn(value),
}
|
def isbns(self, key, value)
|
Populate the ``isbns`` key.
| 4.111602
| 3.986311
| 1.03143
|
def _get_first_non_curator_source(sources):
sources_without_curator = [el for el in sources if el.upper() != 'CURATOR']
return force_single_element(sources_without_curator)
def _get_material(value):
MATERIAL_MAP = {
'ebook': 'publication',
}
q_value = force_single_element(value.get('q', ''))
normalized_q_value = q_value.lower()
return MATERIAL_MAP.get(normalized_q_value, normalized_q_value)
def _is_doi(id_, type_):
return (not type_ or type_.upper() == 'DOI') and is_doi(id_)
def _is_handle(id_, type_):
return (not type_ or type_.upper() == 'HDL') and is_handle(id_)
dois = self.get('dois', [])
persistent_identifiers = self.get('persistent_identifiers', [])
values = force_list(value)
for value in values:
id_ = force_single_element(value.get('a', ''))
material = _get_material(value)
schema = force_single_element(value.get('2', ''))
sources = force_list(value.get('9'))
source = _get_first_non_curator_source(sources)
if _is_doi(id_, schema):
dois.append({
'material': material,
'source': source,
'value': normalize_doi(id_),
})
else:
schema = 'HDL' if _is_handle(id_, schema) else schema
persistent_identifiers.append({
'material': material,
'schema': schema,
'source': source,
'value': id_,
})
self['persistent_identifiers'] = persistent_identifiers
return dois
|
def dois(self, key, value)
|
Populate the ``dois`` key.
Also populates the ``persistent_identifiers`` key through side effects.
| 2.995548
| 2.7402
| 1.093186
|
return {
'2': 'DOI',
'9': value.get('source'),
'a': value.get('value'),
'q': value.get('material'),
}
|
def dois2marc(self, key, value)
|
Populate the ``0247`` MARC field.
| 7.240866
| 6.925584
| 1.045524
|
return {
'2': value.get('schema'),
'9': value.get('source'),
'a': value.get('value'),
'q': value.get('material'),
}
|
def persistent_identifiers2marc(self, key, value)
|
Populate the ``0247`` MARC field.
| 6.188312
| 6.139181
| 1.008003
|
def _is_oai(id_, schema):
return id_.startswith('oai:')
def _is_desy(id_, schema):
return id_ and schema in ('DESY',)
def _is_texkey(id_, schema):
return id_ and schema in ('INSPIRETeX', 'SPIRESTeX')
texkeys = self.get('texkeys', [])
external_system_identifiers = self.get('external_system_identifiers', [])
_desy_bookkeeping = self.get('_desy_bookkeeping', [])
values = force_list(value)
for value in values:
ids = force_list(value.get('a', ''))
other_ids = force_list(value.get('z', ''))
schema = force_single_element(value.get('9', ''))
for id_ in ids:
id_ = id_.strip()
if not id_:
continue
if _is_texkey(id_, schema):
texkeys.insert(0, id_)
elif _is_oai(id_, schema):
continue # XXX: ignored.
elif _is_desy(id_, schema):
_desy_bookkeeping.append({'identifier': id_})
else:
external_system_identifiers.insert(0, {
'schema': schema,
'value': id_,
})
for id_ in other_ids:
id_ = id_.strip()
if not id_:
continue
if _is_texkey(id_, schema):
texkeys.append(id_)
elif _is_oai(id_, schema):
continue # XXX: ignored.
elif _is_desy(id_, schema):
_desy_bookkeeping.append({'identifier': id_})
else:
external_system_identifiers.append({
'schema': schema,
'value': id_,
})
self['external_system_identifiers'] = external_system_identifiers
self['_desy_bookkeeping'] = _desy_bookkeeping
return texkeys
|
def texkeys(self, key, value)
|
Populate the ``texkeys`` key.
Also populates the ``external_system_identifiers`` and ``_desy_bookkeeping`` keys through side effects.
| 2.585923
| 2.205359
| 1.172563
|
result = []
values = force_list(value)
if values:
value = values[0]
result.append({
'9': 'INSPIRETeX',
'a': value,
})
for value in values[1:]:
result.append({
'9': 'INSPIRETeX',
'z': value,
})
return result
|
def texkeys2marc(self, key, value)
|
Populate the ``035`` MARC field.
| 4.817169
| 4.336767
| 1.110774
|
def _is_scheme_cernkey(id_, schema):
return schema == 'CERNKEY'
def _is_scheme_spires(id_, schema):
return schema == 'SPIRES'
result_035 = self.get('035', [])
id_dict = self.get('id_dict', defaultdict(list))
result_970 = self.get('970', [])
values = force_list(value)
for value in values:
id_ = value.get('value')
schema = value.get('schema')
if _is_scheme_spires(id_, schema):
result_970.append({
'a': id_,
})
elif _is_scheme_cernkey(id_, schema):
result_035.append({
'9': 'CERNKEY',
'z': id_,
})
else:
id_dict[schema].append(id_)
self['970'] = result_970
self['id_dict'] = id_dict
return result_035
|
def external_system_identifiers2marc(self, key, value)
|
Populate the ``035`` MARC field.
Also populates the ``970`` MARC field through side effects and an extra
``id_dict`` dictionary that holds potentially duplicate IDs that are
post-processed in a filter.
| 3.490487
| 2.823097
| 1.236404
|
def _get_clean_arxiv_eprint(id_):
return id_.split(':')[-1]
def _is_arxiv_eprint(id_, source):
return source.lower() == 'arxiv'
def _is_hidden_report_number(other_id, source):
return other_id
def _get_clean_source(source):
if source == 'arXiv:reportnumber':
return 'arXiv'
return source
arxiv_eprints = self.get('arxiv_eprints', [])
report_numbers = self.get('report_numbers', [])
values = force_list(value)
for value in values:
id_ = force_single_element(value.get('a', ''))
other_id = force_single_element(value.get('z', ''))
categories = [normalize_arxiv_category(category) for category
in force_list(value.get('c'))]
source = force_single_element(value.get('9', ''))
if _is_arxiv_eprint(id_, source):
arxiv_eprints.append({
'categories': categories,
'value': _get_clean_arxiv_eprint(id_),
})
elif _is_hidden_report_number(other_id, source):
report_numbers.append({
'hidden': True,
'source': _get_clean_source(source),
'value': other_id,
})
else:
report_numbers.append({
'source': _get_clean_source(source),
'value': id_,
})
self['report_numbers'] = report_numbers
return arxiv_eprints
|
def arxiv_eprints(self, key, value)
|
Populate the ``arxiv_eprints`` key.
Also populates the ``report_numbers`` key through side effects.
| 2.826962
| 2.619738
| 1.079101
|
result_037 = self.get('037', [])
result_035 = self.get('035', [])
result_65017 = self.get('65017', [])
for value in values:
arxiv_id = value.get('value')
arxiv_id = 'arXiv:' + arxiv_id if is_arxiv_post_2007(arxiv_id) else arxiv_id
result_037.append({
'9': 'arXiv',
'a': arxiv_id,
'c': force_single_element(value.get('categories')),
})
result_035.append({
'9': 'arXiv',
'a': 'oai:arXiv.org:' + value.get('value'),
})
categories = force_list(value.get('categories'))
for category in categories:
result_65017.append({
'2': 'arXiv',
'a': category,
})
self['65017'] = result_65017
self['035'] = result_035
return result_037
|
def arxiv_eprints2marc(self, key, values)
|
Populate the ``037`` MARC field.
Also populates the ``035`` and the ``65017`` MARC fields through side effects.
| 2.885427
| 2.379237
| 1.212753
|
def _get_mangled_source(source):
if source == 'arXiv':
return 'arXiv:reportnumber'
return source
source = _get_mangled_source(value.get('source'))
if value.get('hidden'):
return {
'9': source,
'z': value.get('value'),
}
return {
'9': source,
'a': value.get('value'),
}
|
def report_numbers2marc(self, key, value)
|
Populate the ``037`` MARC field.
| 5.465593
| 5.395712
| 1.012951
|
languages = self.get('languages', [])
values = force_list(value.get('a'))
for value in values:
for language in RE_LANGUAGE.split(value):
try:
name = language.strip().capitalize()
languages.append(pycountry.languages.get(name=name).alpha_2)
except KeyError:
pass
return languages
|
def languages(self, key, value)
|
Populate the ``languages`` key.
| 5.089746
| 4.854918
| 1.048369
|
return {'a': pycountry.languages.get(alpha_2=value).name.lower()}
|
def languages2marc(self, key, value)
|
Populate the ``041`` MARC field.
| 9.921303
| 10.86675
| 0.912996
|
record = get_record_ref(value.get('z'), 'institutions')
return {
'curated_relation': record is not None,
'record': record,
'value': value.get('a'),
}
|
def record_affiliations(self, key, value)
|
Populate the ``record_affiliations`` key.
| 11.216563
| 10.669552
| 1.051268
|
schema = load_schema('hep')
publication_type_schema = schema['properties']['publication_type']
valid_publication_types = publication_type_schema['items']['enum']
document_type = self.get('document_type', [])
publication_type = self.get('publication_type', [])
a_values = force_list(value.get('a'))
for a_value in a_values:
normalized_a_value = a_value.strip().lower()
if normalized_a_value == 'arxiv':
continue # XXX: ignored.
elif normalized_a_value == 'citeable':
self['citeable'] = True
elif normalized_a_value == 'core':
self['core'] = True
elif normalized_a_value == 'noncore':
self['core'] = False
elif normalized_a_value == 'published':
self['refereed'] = True
elif normalized_a_value == 'withdrawn':
self['withdrawn'] = True
elif normalized_a_value == 'deleted':
self['deleted'] = True
elif normalized_a_value in COLLECTIONS_MAP:
self.setdefault('_collections', []).append(COLLECTIONS_MAP[normalized_a_value])
elif normalized_a_value in DOCUMENT_TYPE_MAP:
document_type.append(DOCUMENT_TYPE_MAP[normalized_a_value])
elif normalized_a_value in valid_publication_types:
publication_type.append(normalized_a_value)
c_value = force_single_element(value.get('c', ''))
normalized_c_value = c_value.strip().lower()
if normalized_c_value == 'deleted':
self['deleted'] = True
self['publication_type'] = publication_type
return document_type
|
def document_type(self, key, value)
|
Populate the ``document_type`` key.
Also populates the ``_collections``, ``citeable``, ``core``, ``deleted``,
``refereed``, ``publication_type``, and ``withdrawn`` keys through side
effects.
| 2.719714
| 2.321698
| 1.171433
|
if value in DOCUMENT_TYPE_REVERSE_MAP and DOCUMENT_TYPE_REVERSE_MAP[value]:
return {'a': DOCUMENT_TYPE_REVERSE_MAP[value]}
|
def document_type2marc(self, key, value)
|
Populate the ``980`` MARC field.
| 4.762321
| 4.360028
| 1.092268
|
def _has_curator_flag(value):
normalized_nine_values = [el.upper() for el in force_list(value.get('9'))]
return 'CURATOR' in normalized_nine_values
def _is_curated(value):
return force_single_element(value.get('z')) == '1' and _has_curator_flag(value)
def _set_record(el):
recid = maybe_int(el)
record = get_record_ref(recid, 'literature')
rb.set_record(record)
rb = ReferenceBuilder()
mapping = [
('0', _set_record),
('a', rb.add_uid),
('b', rb.add_uid),
('c', rb.add_collaboration),
('e', partial(rb.add_author, role='ed.')),
('h', rb.add_refextract_authors_str),
('i', rb.add_uid),
('k', rb.set_texkey),
('m', rb.add_misc),
('o', rb.set_label),
('p', rb.set_publisher),
('q', rb.add_parent_title),
('r', rb.add_report_number),
('s', rb.set_pubnote),
('t', rb.add_title),
('x', rb.add_raw_reference),
('y', rb.set_year),
]
for field, method in mapping:
for el in force_list(value.get(field)):
if el:
method(el)
for el in dedupe_list(force_list(value.get('u'))):
if el:
rb.add_url(el)
if _is_curated(value):
rb.curate()
if _has_curator_flag(value):
rb.obj['legacy_curated'] = True
return rb.obj
|
def references(self, key, value)
|
Populate the ``references`` key.
| 4.335742
| 4.278977
| 1.013266
|
reference = value.get('reference', {})
pids = force_list(reference.get('persistent_identifiers'))
a_values = ['doi:' + el for el in force_list(reference.get('dois'))]
a_values.extend(['hdl:' + el['value'] for el in pids if el.get('schema') == 'HDL'])
a_values.extend(['urn:' + el['value'] for el in pids if el.get('schema') == 'URN'])
external_ids = force_list(reference.get('external_system_identifiers'))
u_values = force_list(get_value(reference, 'urls.value'))
u_values.extend(CDS_RECORD_FORMAT.format(el['value']) for el in external_ids if el.get('schema') == 'CDS')
u_values.extend(ADS_RECORD_FORMAT.format(el['value']) for el in external_ids if el.get('schema') == 'ADS')
authors = force_list(reference.get('authors'))
e_values = [el['full_name'] for el in authors if el.get('inspire_role') == 'editor']
h_values = [el['full_name'] for el in authors if el.get('inspire_role') != 'editor']
r_values = force_list(reference.get('report_numbers'))
if reference.get('arxiv_eprint'):
arxiv_eprint = reference['arxiv_eprint']
r_values.append('arXiv:' + arxiv_eprint if is_arxiv_post_2007(arxiv_eprint) else arxiv_eprint)
if reference.get('publication_info'):
reference['publication_info'] = convert_new_publication_info_to_old([reference['publication_info']])[0]
journal_title = get_value(reference, 'publication_info.journal_title')
journal_volume = get_value(reference, 'publication_info.journal_volume')
page_start = get_value(reference, 'publication_info.page_start')
page_end = get_value(reference, 'publication_info.page_end')
artid = get_value(reference, 'publication_info.artid')
s_value = build_pubnote(journal_title, journal_volume, page_start, page_end, artid)
m_value = ' / '.join(force_list(reference.get('misc')))
return {
'0': get_recid_from_ref(value.get('record')),
'9': 'CURATOR' if value.get('legacy_curated') else None,
'a': a_values,
'b': get_value(reference, 'publication_info.cnum'),
'c': reference.get('collaborations'),
'e': e_values,
'h': h_values,
'i': reference.get('isbn'),
'k': reference.get('texkey'),
'm': m_value,
'o': reference.get('label'),
'p': get_value(reference, 'imprint.publisher'),
'q': get_value(reference, 'publication_info.parent_title'),
'r': r_values,
's': s_value,
't': get_value(reference, 'title.title'),
'u': u_values,
'x': get_value(value, 'raw_refs.value'),
'y': get_value(reference, 'publication_info.year'),
'z': 1 if value.get('curated_relation') else 0,
}
|
def references2marc(self, key, value)
|
Populate the ``999C5`` MARC field.
| 2.835053
| 2.821749
| 1.004715
|
def _is_hidden(value):
return 'HIDDEN' in [val.upper() for val in value] or None
def _is_figure(value):
figures_extensions = ['.png']
return value.get('f') in figures_extensions
def _is_fulltext(value):
return value.get('d', '').lower() == 'fulltext' or None
def _get_index_and_caption(value):
match = re.compile(r'(^\d{5})?\s*(.*)').match(value)
if match:
return match.group(1), match.group(2)
def _get_key(value):
fname = value.get('n', 'document')
extension = value.get('f', '')
if fname.endswith(extension):
return fname
return fname + extension
def _get_source(value):
source = value.get('t', '')
if source in ('INSPIRE-PUBLIC', 'Main'):
source = None
elif source.lower() == 'arxiv':
return 'arxiv'
return source
figures = self.get('figures', [])
is_context = value.get('f', '').endswith('context')
if is_context:
return
if _is_figure(value):
index, caption = _get_index_and_caption(value.get('d', ''))
figures.append({
'key': _get_key(value),
'caption': caption,
'url': afs_url(value.get('a')),
'order': index,
'source': 'arxiv', # XXX: we don't have any other figures on legacy
})
self['figures'] = figures
else:
return {
'description': value.get('d') if not _is_fulltext(value) else None,
'key': _get_key(value),
'fulltext': _is_fulltext(value),
'hidden': _is_hidden(force_list(value.get('o'))),
'url': afs_url(value.get('a')),
'source': _get_source(value),
}
|
def documents(self, key, value)
|
Populate the ``documents`` key.
Also populates the ``figures`` key through side effects.
| 3.839185
| 3.691251
| 1.040077
|
marcjson = create_record(marcxml, keep_singletons=False)
collections = _get_collections(marcjson)
if 'conferences' in collections:
return conferences.do(marcjson)
elif 'data' in collections:
return data.do(marcjson)
elif 'experiment' in collections:
return experiments.do(marcjson)
elif 'hepnames' in collections:
return hepnames.do(marcjson)
elif 'institution' in collections:
return institutions.do(marcjson)
elif 'job' in collections or 'jobhidden' in collections:
return jobs.do(marcjson)
elif 'journals' in collections or 'journalsnew' in collections:
return journals.do(marcjson)
return hep.do(marcjson)
|
def marcxml2record(marcxml)
|
Convert a MARCXML string to a JSON record.
Tries to guess which set of rules to use by inspecting the contents
of the ``980__a`` MARC field, but falls back to HEP in case nothing
matches, because records belonging to special collections logically
belong to the Literature collection but don't have ``980__a:HEP``.
Args:
marcxml(str): a string containing MARCXML.
Returns:
dict: a JSON record converted from the string.
| 3.166177
| 3.187709
| 0.993245
|
schema_name = _get_schema_name(record)
if schema_name == 'hep':
marcjson = hep2marc.do(record)
elif schema_name == 'authors':
marcjson = hepnames2marc.do(record)
else:
raise NotImplementedError(u'JSON -> MARC rules missing for "{}"'.format(schema_name))
record = RECORD()
for key, values in sorted(iteritems(marcjson)):
tag, ind1, ind2 = _parse_key(key)
if _is_controlfield(tag, ind1, ind2):
value = force_single_element(values)
if not isinstance(value, text_type):
value = text_type(value)
record.append(CONTROLFIELD(_strip_invalid_chars_for_xml(value), {'tag': tag}))
else:
for value in force_list(values):
datafield = DATAFIELD({'tag': tag, 'ind1': ind1, 'ind2': ind2})
for code, els in sorted(iteritems(value)):
for el in force_list(els):
if not isinstance(el, text_type):
el = text_type(el)
datafield.append(SUBFIELD(_strip_invalid_chars_for_xml(el), {'code': code}))
record.append(datafield)
return tostring(record, encoding='utf8', pretty_print=True)
|
def record2marcxml(record)
|
Convert a JSON record to a MARCXML string.
Deduces which set of rules to use by parsing the ``$schema`` key, as
it unequivocally determines which kind of record we have.
Args:
record(dict): a JSON record.
Returns:
str: a MARCXML string converted from the record.
| 3.37002
| 3.51853
| 0.957792
|
result = maybe_int(force_single_element(value.get('a', '')))
if result and result > 0:
return result
|
def number_of_pages(self, key, value)
|
Populate the ``number_of_pages`` key.
| 13.483664
| 13.044195
| 1.033691
|
preliminary_results_prefixes = ['ATLAS-CONF-', 'CMS-PAS-', 'CMS-DP-', 'LHCB-CONF-']
note_prefixes = ['ALICE-INT-', 'ATL-', 'ATLAS-CONF-', 'CMS-DP-', 'CMS-PAS-', 'LHCB-CONF-', 'LHCB-PUB-']
result_037 = self.get('037__', [])
result_500 = self.get('500__', [])
result_595 = self.get('595__', [])
result_980 = self.get('980__', [])
report = force_single_element(value.get('a', ''))
hidden_report = force_single_element(value.get('9') or value.get('z', ''))
source = 'CDS' if not is_arxiv(report) else 'arXiv'
if any(report.upper().startswith(prefix) for prefix in note_prefixes):
result_980.append({'a': 'NOTE'})
if any(report.upper().startswith(prefix) for prefix in preliminary_results_prefixes):
result_500.append({'9': 'CDS', 'a': 'Preliminary results'})
is_barcode = hidden_report.startswith('P0') or hidden_report.startswith('CM-P0')
if not report.startswith('SIS-') and not is_barcode:
result_037.append({
'9': source,
'a': report,
'c': value.get('c'),
'z': hidden_report if source == 'CDS' else None,
})
self['500__'] = result_500
self['595__'] = result_595
self['980__'] = result_980
return result_037
|
def secondary_report_numbers(self, key, value)
|
Populate the ``037`` MARC field.
Also populates the ``500``, ``595`` and ``980`` MARC field through side effects.
| 4.316386
| 3.78055
| 1.141735
|
field_700 = self.get('700__', [])
field_701 = self.get('701__', [])
is_supervisor = any(el.lower().startswith('dir') for el in force_list(value.get('e', '')))
if is_supervisor:
field_701.append(_converted_author(value))
else:
field_700.append(_converted_author(value))
self['701__'] = field_701
return field_700
|
def nonfirst_authors(self, key, value)
|
Populate ``700`` MARC field.
Also populates the ``701`` MARC field through side-effects.
| 5.001022
| 4.163787
| 1.201075
|
def _is_preprint(value):
return value.get('y', '').lower() == 'preprint'
def _is_fulltext(value):
return value['u'].endswith('.pdf') and value['u'].startswith('http://cds.cern.ch')
def _is_local_copy(value):
return 'local copy' in value.get('y', '')
def _is_ignored_domain(value):
ignored_domains = ['http://cdsweb.cern.ch', 'http://cms.cern.ch',
'http://cmsdoc.cern.ch', 'http://documents.cern.ch',
'http://preprints.cern.ch', 'http://cds.cern.ch',
'http://arxiv.org']
return any(value['u'].startswith(domain) for domain in ignored_domains)
field_8564 = self.get('8564_', [])
field_FFT = self.get('FFT__', [])
if 'u' not in value:
return field_8564
url = escape_url(value['u'])
if _is_fulltext(value) and not _is_preprint(value):
if _is_local_copy(value):
description = value.get('y', '').replace('local copy', 'on CERN Document Server')
field_8564.append({
'u': url,
'y': description,
})
else:
_, file_name = os.path.split(urllib.parse.urlparse(value['u']).path)
_, extension = os.path.splitext(file_name)
field_FFT.append({
't': 'CDS',
'a': url,
'd': value.get('y', ''),
'n': file_name,
'f': extension,
})
elif not _is_ignored_domain(value):
field_8564.append({
'u': url,
'y': value.get('y'),
})
self['FFT__'] = field_FFT
return field_8564
|
def urls(self, key, value)
|
Populate the ``8564`` MARC field.
Also populate the ``FFT`` field through side effects.
| 3.142
| 2.827893
| 1.111075
|
if not key.startswith('245'):
return {
'source': value.get('9'),
'subtitle': value.get('b'),
'title': value.get('a'),
}
self.setdefault('titles', []).insert(0, {
'source': value.get('9'),
'subtitle': value.get('b'),
'title': value.get('a'),
})
|
def titles(self, key, value)
|
Populate the ``titles`` key.
| 3.804403
| 3.654632
| 1.040981
|
return {
'language': langdetect.detect(value.get('a')),
'source': value.get('9'),
'subtitle': value.get('b'),
'title': value.get('a'),
}
|
def title_translations(self, key, value)
|
Populate the ``title_translations`` key.
| 6.297039
| 6.601796
| 0.953837
|
first, rest = values[0], values[1:]
self.setdefault('245', []).append({
'a': first.get('title'),
'b': first.get('subtitle'),
'9': first.get('source'),
})
return [
{
'a': value.get('title'),
'b': value.get('subtitle'),
'9': value.get('source'),
} for value in rest
]
|
def titles2marc(self, key, values)
|
Populate the ``246`` MARC field.
Also populates the ``245`` MARC field through side effects.
| 2.832095
| 3.15856
| 0.896641
|
return {
'a': value.get('title'),
'b': value.get('subtitle'),
'9': value.get('source'),
}
|
def title_translations2marc(self, key, value)
|
Populate the ``242`` MARC field.
| 4.564257
| 4.42359
| 1.031799
|
return {
'place': value.get('a'),
'publisher': value.get('b'),
'date': normalize_date_aggressively(value.get('c')),
}
|
def imprints(self, key, value)
|
Populate the ``imprints`` key.
| 8.53527
| 7.825248
| 1.090735
|
return {
'a': value.get('place'),
'b': value.get('publisher'),
'c': value.get('date'),
}
|
def imprints2marc(self, key, value)
|
Populate the ``260`` MARC field.
| 4.675845
| 4.509909
| 1.036794
|
def _means_not_curated(public_note):
return public_note in [
'*Brief entry*',
'* Brief entry *',
'*Temporary entry*',
'* Temporary entry *',
'*Temporary record*',
'* Temporary record *',
]
public_notes = self.get('public_notes', [])
thesis_info = self.get('thesis_info', {})
source = force_single_element(value.get('9', ''))
for value in force_list(value):
for public_note in force_list(value.get('a')):
match = IS_DEFENSE_DATE.match(public_note)
if match:
try:
thesis_info['defense_date'] = normalize_date(match.group('defense_date'))
except ValueError:
public_notes.append({
'source': source,
'value': public_note,
})
elif _means_not_curated(public_note):
self['curated'] = False
else:
public_notes.append({
'source': source,
'value': public_note,
})
self['thesis_info'] = thesis_info
return public_notes
|
def public_notes(self, key, value)
|
Populate the ``public_notes`` key.
Also populates the ``curated`` and ``thesis_info`` keys through side effects.
| 4.506859
| 4.031061
| 1.118033
|
def _get_degree_type(value):
DEGREE_TYPES_MAP = {
'RAPPORT DE STAGE': 'other',
'INTERNSHIP REPORT': 'other',
'DIPLOMA': 'diploma',
'BACHELOR': 'bachelor',
'LAUREA': 'laurea',
'MASTER': 'master',
'THESIS': 'other',
'PHD': 'phd',
'PDF': 'phd',
'PH.D. THESIS': 'phd',
'HABILITATION': 'habilitation',
}
b_value = force_single_element(value.get('b', ''))
if b_value:
return DEGREE_TYPES_MAP.get(b_value.upper(), 'other')
def _get_institutions(value):
c_values = force_list(value.get('c'))
z_values = force_list(value.get('z'))
# XXX: we zip only when they have the same length, otherwise
# we might match a value with the wrong recid.
if len(c_values) != len(z_values):
return [{'name': c_value} for c_value in c_values]
else:
return [{
'curated_relation': True,
'name': c_value,
'record': get_record_ref(z_value, 'institutions'),
} for c_value, z_value in zip(c_values, z_values)]
thesis_info = self.get('thesis_info', {})
thesis_info['date'] = normalize_date(force_single_element(value.get('d')))
thesis_info['degree_type'] = _get_degree_type(value)
thesis_info['institutions'] = _get_institutions(value)
return thesis_info
|
def thesis_info(self, key, value)
|
Populate the ``thesis_info`` key.
| 3.867854
| 3.750615
| 1.031258
|
def _get_b_value(value):
DEGREE_TYPES_MAP = {
'bachelor': 'Bachelor',
'diploma': 'Diploma',
'habilitation': 'Habilitation',
'laurea': 'Laurea',
'master': 'Master',
'other': 'Thesis',
'phd': 'PhD',
}
degree_type = value.get('degree_type')
if degree_type:
return DEGREE_TYPES_MAP.get(degree_type)
result_500 = self.get('500', [])
result_502 = self.get('502', {})
if value.get('defense_date'):
result_500.append({
'a': u'Presented on {}'.format(value.get('defense_date')),
})
result_502 = {
'b': _get_b_value(value),
'c': [el['name'] for el in force_list(value.get('institutions'))],
'd': value.get('date'),
}
self['500'] = result_500
return result_502
|
def thesis_info2marc(self, key, value)
|
Populate the ``502`` MARC field.
Also populates the ``500`` MARC field through side effects.
| 3.607292
| 3.312795
| 1.088897
|
result = []
source = force_single_element(value.get('9'))
for a_value in force_list(value.get('a')):
result.append({
'source': source,
'value': a_value,
})
return result
|
def abstracts(self, key, value)
|
Populate the ``abstracts`` key.
| 7.750946
| 6.937846
| 1.117198
|
return {
'agency': value.get('a'),
'grant_number': value.get('c'),
'project_number': value.get('f'),
}
|
def funding_info(self, key, value)
|
Populate the ``funding_info`` key.
| 6.264318
| 5.911957
| 1.059601
|
return {
'a': value.get('agency'),
'c': value.get('grant_number'),
'f': value.get('project_number'),
}
|
def funding_info2marc(self, key, value)
|
Populate the ``536`` MARC field.
| 4.970734
| 4.760396
| 1.044185
|
def _get_license(value):
a_values = force_list(value.get('a'))
oa_licenses = [el for el in a_values if el == 'OA' or el == 'Open Access']
other_licenses = [el for el in a_values if el != 'OA' and el != 'Open Access']
if not other_licenses:
return force_single_element(oa_licenses)
return force_single_element(other_licenses)
def _get_material(value):
material = value.get('3', '').lower()
if material == 'article':
return 'publication'
return material
return {
'imposing': value.get('b'),
'license': _get_license(value),
'material': _get_material(value),
'url': value.get('u'),
}
|
def license(self, key, value)
|
Populate the ``license`` key.
| 4.141431
| 4.085623
| 1.01366
|
return {
'a': value.get('license'),
'b': value.get('imposing'),
'u': value.get('url'),
'3': value.get('material'),
}
|
def license2marc(self, key, value)
|
Populate the ``540`` MARC field.
| 7.413738
| 6.957043
| 1.065645
|
MATERIAL_MAP = {
'Article': 'publication',
'Published thesis as a book': 'publication',
}
material = value.get('e') or value.get('3')
return {
'holder': value.get('d'),
'material': MATERIAL_MAP.get(material),
'statement': value.get('f'),
'url': value.get('u'),
'year': maybe_int(value.get('g')),
}
|
def copyright(self, key, value)
|
Populate the ``copyright`` key.
| 7.265054
| 6.81452
| 1.066114
|
E_MAP = {
'publication': 'Article',
}
e_value = value.get('material')
return {
'd': value.get('holder'),
'e': E_MAP.get(e_value),
'f': value.get('statement'),
'g': value.get('year'),
'u': value.get('url'),
}
|
def copyright2marc(self, key, value)
|
Populate the ``542`` MARC field.
| 6.011621
| 5.74013
| 1.047297
|
def _is_for_cds(value):
normalized_c_values = [el.upper() for el in force_list(value.get('c'))]
return 'CDS' in normalized_c_values
def _is_for_hal(value):
normalized_c_values = [el.upper() for el in force_list(value.get('c'))]
return 'HAL' in normalized_c_values
def _is_not_for_hal(value):
normalized_c_values = [el.upper() for el in force_list(value.get('c'))]
return 'NOT HAL' in normalized_c_values
_private_notes = self.get('_private_notes', [])
_export_to = self.get('_export_to', {})
for value in force_list(value):
if _is_for_cds(value):
_export_to['CDS'] = True
if _is_for_hal(value):
_export_to['HAL'] = True
elif _is_not_for_hal(value):
_export_to['HAL'] = False
source = force_single_element(value.get('9'))
for _private_note in force_list(value.get('a')):
_private_notes.append({
'source': source,
'value': _private_note,
})
self['_export_to'] = _export_to
return _private_notes
|
def _private_notes(self, key, value)
|
Populate the ``_private_notes`` key.
Also populates the ``_export_to`` key through side effects.
| 2.691786
| 2.483932
| 1.08368
|
def _is_from_hal(value):
return value.get('source') == 'HAL'
if not _is_from_hal(value):
return {
'9': value.get('source'),
'a': value.get('value'),
}
self.setdefault('595_H', []).append({'a': value.get('value')})
|
def _private_notes2marc(self, key, value)
|
Populate the ``595`` MARC key.
Also populates the `595_H` MARC key through side effects.
| 7.307973
| 4.961171
| 1.473034
|
def _is_for_cds(value):
return 'CDS' in value
def _is_for_hal(value):
return 'HAL' in value and value['HAL']
def _is_not_for_hal(value):
return 'HAL' in value and not value['HAL']
result = []
if _is_for_cds(value):
result.append({'c': 'CDS'})
if _is_for_hal(value):
result.append({'c': 'HAL'})
elif _is_not_for_hal(value):
result.append({'c': 'not HAL'})
return result
|
def _export_to2marc(self, key, value)
|
Populate the ``595`` MARC field.
| 2.787387
| 2.834651
| 0.983326
|
return {
'date': normalize_date(value.get('d')),
'expert': force_single_element(value.get('a')),
'status': value.get('s'),
}
|
def _desy_bookkeeping(self, key, value)
|
Populate the ``_desy_bookkeeping`` key.
| 8.639607
| 8.716738
| 0.991151
|
if 'identifier' not in value:
return {
'a': value.get('expert'),
'd': value.get('date'),
's': value.get('status'),
}
self.setdefault('035', []).append({
'9': 'DESY',
'z': value['identifier']
})
|
def _desy_bookkeeping2marc(self, key, value)
|
Populate the ``595_D`` MARC field.
Also populates the ``035`` MARC field through side effects.
| 6.72047
| 5.8481
| 1.149172
|
if value.get('q'):
self['date_proposed'] = normalize_date(value['q'])
if value.get('r'):
self['date_approved'] = normalize_date(value['r'])
if value.get('s'):
self['date_started'] = normalize_date(value['s'])
if value.get('c'):
self['date_cancelled'] = normalize_date(value['c'])
if value.get('t'):
self['date_completed'] = normalize_date(value['t'])
raise IgnoreKey
|
def _dates(self, key, value)
|
Don't populate any key through the return value.
On the other hand, populates the ``date_proposed``, ``date_approved``,
``date_started``, ``date_cancelled``, and the ``date_completed`` keys
through side effects.
| 2.580143
| 1.994672
| 1.293517
|
experiment = self.get('experiment', {})
legacy_name = self.get('legacy_name', '')
accelerator = self.get('accelerator', {})
institutions = self.get('institutions', [])
for value in force_list(values):
if value.get('c'):
experiment['value'] = value.get('c')
if value.get('d'):
experiment['short_name'] = value.get('d')
if value.get('a'):
legacy_name = value.get('a')
if value.get('b'):
accelerator['value'] = value.get('b')
institution = {}
if value.get('u'):
institution['value'] = value.get('u')
if value.get('z'):
record = get_record_ref(maybe_int(value.get('z')), 'institutions')
if record:
institution['curated_relation'] = True
institution['record'] = record
institutions.append(institution)
self['legacy_name'] = legacy_name
self['accelerator'] = accelerator
self['institutions'] = institutions
return experiment
|
def experiment(self, key, values)
|
Populate the ``experiment`` key.
Also populates the ``legacy_name``, the ``accelerator``, and the
``institutions`` keys through side effects.
| 3.139162
| 2.555258
| 1.228511
|
core = self.get('core')
deleted = self.get('deleted')
project_type = self.get('project_type', [])
if not core:
normalized_a_values = [el.upper() for el in force_list(value.get('a'))]
if 'CORE' in normalized_a_values:
core = True
if not deleted:
normalized_c_values = [el.upper() for el in force_list(value.get('c'))]
if 'DELETED' in normalized_c_values:
deleted = True
if not project_type:
normalized_a_values = [el.upper() for el in force_list(value.get('a'))]
if 'ACCELERATOR' in normalized_a_values:
project_type.append('accelerator')
self['project_type'] = project_type
self['deleted'] = deleted
return core
|
def core(self, key, value)
|
Populate the ``core`` key.
Also populates the ``deleted`` and ``project_type`` keys through side
effects.
| 2.69618
| 2.410321
| 1.118598
|
def _control_number(self, key, value):
self['self'] = get_record_ref(int(value), endpoint)
return int(value)
return _control_number
|
def control_number(endpoint)
|
Populate the ``control_number`` key.
Also populates the ``self`` key through side effects.
| 9.964553
| 7.148843
| 1.393869
|
def _get_datetime(value):
d_value = force_single_element(value.get('d', ''))
if d_value:
try:
date = PartialDate.loads(d_value)
except ValueError:
return d_value
else:
datetime_ = datetime(year=date.year, month=date.month, day=date.day)
return datetime_.isoformat()
internal_uid, orcid, source = None, None, None
a_values = force_list(value.get('a'))
for a_value in a_values:
if IS_INTERNAL_UID.match(a_value):
if a_value.startswith('inspire:uid:'):
internal_uid = int(a_value[12:])
else:
internal_uid = int(a_value)
elif IS_ORCID.match(a_value):
if a_value.startswith('orcid:'):
orcid = a_value[6:]
else:
orcid = a_value
else:
source = a_value
c_value = force_single_element(value.get('c', ''))
normalized_c_value = c_value.lower()
if normalized_c_value == 'batchupload':
method = 'batchuploader'
elif normalized_c_value == 'submission':
method = 'submitter'
else:
method = normalized_c_value
return {
'datetime': _get_datetime(value),
'email': value.get('b'),
'internal_uid': internal_uid,
'method': method,
'orcid': orcid,
'source': source,
'submission_number': value.get('e'),
}
|
def acquisition_source(self, key, value)
|
Populate the ``acquisition_source`` key.
| 2.817532
| 2.802813
| 1.005252
|
return [
{
'source': value.get('9'),
'value': public_note,
} for public_note in force_list(value.get('a'))
]
|
def public_notes_500(self, key, value)
|
Populate the ``public_notes`` key.
| 8.386397
| 8.059411
| 1.040572
|
return [
{
'source': value.get('9'),
'value': _private_note,
} for _private_note in force_list(value.get('a'))
]
|
def _private_notes_595(self, key, value)
|
Populate the ``_private_notes`` key.
| 8.612991
| 7.887519
| 1.091977
|
@utils.flatten
@utils.for_each_value
def _external_system_identifiers(self, key, value):
new_recid = maybe_int(value.get('d'))
if new_recid:
self['new_record'] = get_record_ref(new_recid, endpoint)
return [
{
'schema': 'SPIRES',
'value': ext_sys_id,
} for ext_sys_id in force_list(value.get('a'))
]
return _external_system_identifiers
|
def external_system_identifiers(endpoint)
|
Populate the ``external_system_identifiers`` key.
Also populates the ``new_record`` key through side effects.
| 6.835236
| 5.950144
| 1.148751
|
@utils.for_each_value
def _deleted_records(self, key, value):
deleted_recid = maybe_int(value.get('a'))
if deleted_recid:
return get_record_ref(deleted_recid, endpoint)
return _deleted_records
|
def deleted_records(endpoint)
|
Populate the ``deleted_records`` key.
| 7.014112
| 6.804848
| 1.030752
|
result = []
a_value = force_single_element(value.get('a'))
e_values = [el for el in force_list(value.get('e')) if el != '-']
zero_values = force_list(value.get('0'))
if a_value and not e_values:
result.append({'accelerator': a_value})
# XXX: we zip only when they have the same length, otherwise
# we might match a value with the wrong recid.
if len(e_values) == len(zero_values):
for e_value, zero_value in zip(e_values, zero_values):
result.append({
'legacy_name': e_value,
'record': get_record_ref(zero_value, 'experiments'),
})
else:
for e_value in e_values:
result.append({'legacy_name': e_value})
return result
|
def accelerator_experiments(self, key, value)
|
Populate the ``accelerator_experiments`` key.
| 4.36664
| 4.327188
| 1.009117
|
keywords = self.get('keywords', [])
energy_ranges = self.get('energy_ranges', [])
for value in force_list(values):
if value.get('a'):
schema = force_single_element(value.get('2', '')).upper()
sources = force_list(value.get('9'))
a_values = force_list(value.get('a'))
if 'conference' not in sources:
for a_value in a_values:
keywords.append({
'schema': schema,
'source': force_single_element(sources),
'value': a_value,
})
if value.get('e'):
energy_ranges.append(ENERGY_RANGES_MAP.get(value.get('e')))
self['energy_ranges'] = energy_ranges
return keywords
|
def keywords(self, key, values)
|
Populate the ``keywords`` key.
Also populates the ``energy_ranges`` key through side effects.
| 4.232082
| 3.771228
| 1.122203
|
result_695 = self.get('695', [])
result_084 = self.get('084', [])
result_6531 = self.get('6531', [])
for value in values:
schema = value.get('schema')
source = value.get('source')
keyword = value.get('value')
if schema == 'PACS' or schema == 'PDG':
result_084.append({
'2': schema,
'9': source,
'a': keyword,
})
elif schema == 'JACOW':
result_6531.append({
'2': 'JACoW',
'9': source,
'a': keyword,
})
elif schema == 'INSPIRE':
result_695.append({
'2': 'INSPIRE',
'9': source,
'a': keyword,
})
elif schema == 'INIS':
result_695.append({
'2': 'INIS',
'9': source,
'a': keyword,
})
elif source != 'magpie':
result_6531.append({
'9': source,
'a': keyword,
})
self['6531'] = result_6531
self['084'] = result_084
return result_695
|
def keywords2marc(self, key, values)
|
Populate the ``695`` MARC field.
Also populates the ``084`` and ``6531`` MARC fields through side effects.
| 2.618365
| 2.229094
| 1.174632
|
result = []
for g_value in force_list(value.get('g')):
collaborations = normalize_collaboration(g_value)
if len(collaborations) == 1:
result.append({
'record': get_record_ref(maybe_int(value.get('0')), 'experiments'),
'value': collaborations[0],
})
else:
result.extend({'value': collaboration} for collaboration in collaborations)
return result
|
def collaborations(self, key, value)
|
Populate the ``collaborations`` key.
| 6.338878
| 5.862449
| 1.081268
|
def _get_cnum(value):
w_value = force_single_element(value.get('w', ''))
normalized_w_value = w_value.replace('/', '-').upper()
return normalized_w_value
def _get_material(value):
schema = load_schema('elements/material')
valid_materials = schema['enum']
m_value = force_single_element(value.get('m', ''))
normalized_m_value = m_value.lower()
if normalized_m_value in valid_materials:
return normalized_m_value
def _get_parent_isbn(value):
z_value = force_single_element(value.get('z', ''))
if z_value:
return normalize_isbn(z_value)
def _get_pubinfo_freetext(value):
x_value = force_single_element(value.get('x', ''))
if not x_value.startswith('#DONE'):
return x_value
page_start, page_end, artid = split_page_artid(value.get('c'))
parent_recid = maybe_int(force_single_element(value.get('0')))
parent_record = get_record_ref(parent_recid, 'literature')
journal_recid = maybe_int(force_single_element(value.get('1')))
journal_record = get_record_ref(journal_recid, 'journals')
conference_recid = maybe_int(force_single_element(value.get('2')))
conference_record = get_record_ref(conference_recid, 'conferences')
return {
'artid': artid,
'cnum': _get_cnum(value),
'conf_acronym': force_single_element(value.get('q')),
'conference_record': conference_record,
'hidden': key.startswith('7731') or None,
'journal_issue': force_single_element(value.get('n')),
'journal_record': journal_record,
'journal_title': force_single_element(value.get('p')),
'journal_volume': force_single_element(value.get('v')),
'material': _get_material(value),
'page_end': page_end,
'page_start': page_start,
'parent_isbn': _get_parent_isbn(value),
'parent_record': parent_record,
'parent_report_number': force_single_element(value.get('r')),
'pubinfo_freetext': _get_pubinfo_freetext(value),
'year': maybe_int(force_single_element(value.get('y'))),
}
|
def publication_info(self, key, value)
|
Populate the ``publication_info`` key.
| 2.625996
| 2.588229
| 1.014592
|
result_773 = self.get('773', [])
result_7731 = self.get('7731', [])
for value in force_list(convert_new_publication_info_to_old(values)):
page_artid = []
if value.get('page_start') and value.get('page_end'):
page_artid.append(u'{page_start}-{page_end}'.format(**value))
elif value.get('page_start'):
page_artid.append(u'{page_start}'.format(**value))
elif value.get('artid'):
page_artid.append(u'{artid}'.format(**value))
result = {
'0': get_recid_from_ref(value.get('parent_record')),
'c': page_artid,
'm': value.get('material'),
'n': value.get('journal_issue'),
'p': value.get('journal_title'),
'q': value.get('conf_acronym'),
'r': value.get('parent_report_number'),
'v': value.get('journal_volume'),
'w': value.get('cnum'),
'x': value.get('pubinfo_freetext'),
'y': value.get('year'),
'z': value.get('parent_isbn'),
}
if value.get('hidden'):
result_7731.append(result)
else:
result_773.append(result)
self['7731'] = result_7731
return result_773
|
def publication_info2marc(self, key, values)
|
Populate the ``773`` MARC field.
Also populates the ``7731`` MARC field through side effects.
| 3.25131
| 3.038653
| 1.069984
|
record = get_record_ref(maybe_int(value.get('w')), 'literature')
if record:
return {
'curated_relation': record is not None,
'record': record,
'relation': 'predecessor',
}
|
def related_records_78002(self, key, value)
|
Populate the ``related_records`` key.
| 12.687979
| 12.448434
| 1.019243
|
record = get_record_ref(maybe_int(value.get('w')), 'literature')
if record:
return {
'curated_relation': record is not None,
'record': record,
'relation': 'successor',
}
|
def related_records_78502(self, key, value)
|
Populate the ``related_records`` key.
| 13.566691
| 13.345384
| 1.016583
|
record = get_record_ref(maybe_int(value.get('w')), 'literature')
if record:
return {
'curated_relation': record is not None,
'record': record,
'relation_freetext': value.get('i'),
}
|
def related_records_78708(self, key, value)
|
Populate the ``related_records`` key.
| 11.572088
| 11.303019
| 1.023805
|
if value.get('relation_freetext'):
return {
'i': value.get('relation_freetext'),
'w': get_recid_from_ref(value.get('record')),
}
elif value.get('relation') == 'successor':
self.setdefault('78502', []).append({
'i': 'superseded by',
'w': get_recid_from_ref(value.get('record')),
})
elif value.get('relation') == 'predecessor':
self.setdefault('78002', []).append({
'i': 'supersedes',
'w': get_recid_from_ref(value.get('record')),
})
else:
raise NotImplementedError(u"Unhandled relation in related_records: {}".format(value.get('relation')))
|
def related_records2marc(self, key, value)
|
Populate the ``78708`` MARC field
Also populates the ``78002``, ``78502`` MARC fields through side effects.
| 3.263204
| 2.798191
| 1.166183
|
return [
{
'source': value.get('9'),
'value': _private_note,
} for _private_note in force_list(value.get('x'))
]
|
def _private_notes(self, key, value)
|
Populate the ``_private_notes`` key.
| 8.692898
| 8.38018
| 1.037316
|
proceedings = self.get('proceedings')
refereed = self.get('refereed')
if not proceedings:
normalized_a_values = [el.upper() for el in force_list(value.get('a'))]
if 'PROCEEDINGS' in normalized_a_values:
proceedings = True
if not refereed:
normalized_a_values = [el.upper() for el in force_list(value.get('a'))]
if 'PEER REVIEW' in normalized_a_values:
refereed = True
elif 'NON-PUBLISHED' in normalized_a_values:
refereed = False
self['refereed'] = refereed
return proceedings
|
def proceedings(self, key, value)
|
Populate the ``proceedings`` key.
Also populates the ``refereed`` key through side effects.
| 3.070673
| 2.837787
| 1.082066
|
short_title = value.get('a')
title_variants = self.get('title_variants', [])
if value.get('u'):
short_title = value.get('u')
title_variants.append(value.get('a'))
self['title_variants'] = title_variants
return short_title
|
def short_title(self, key, value)
|
Populate the ``short_title`` key.
Also populates the ``title_variants`` key through side effects.
| 4.606118
| 3.582847
| 1.285603
|
deleted = self.get('deleted')
book_series = self.get('book_series')
if not deleted:
normalized_a_values = [el.upper() for el in force_list(value.get('a'))]
normalized_c_values = [el.upper() for el in force_list(value.get('c'))]
if 'DELETED' in normalized_a_values or 'DELETED' in normalized_c_values:
deleted = True
if not book_series:
normalized_a_values = [el.upper() for el in force_list(value.get('a'))]
if 'BOOKSERIES' in normalized_a_values:
book_series = True
self['book_series'] = book_series
return deleted
|
def deleted(self, key, value)
|
Populate the ``deleted`` key.
Also populates the ``book_series`` key through side effects.
| 2.730734
| 2.525201
| 1.081393
|
return [normalize_rank(el) for el in force_list(value.get('a'))]
|
def ranks(self, key, value)
|
Populate the ``ranks`` key.
| 21.406355
| 22.316635
| 0.959211
|
parser = argparse.ArgumentParser(description=self.description)
parser.add_argument(
'--version', help='show version and exit',
default=False, action='store_true')
parser.add_argument(
'--debug', help='enable debugging',
default=False, action='store_true')
return parser
|
def new_parser(self)
|
Create a command line argument parser
Add a few default flags, such as --version
for displaying the program version when invoked
| 2.517852
| 2.229897
| 1.129134
|
self.registered[command] = {
'function': function, 'description': description
}
|
def add_command(self, command, function, description=None)
|
Register a new function with a the name `command` and
`description` (which will be shown then help is invoked).
| 5.878416
| 4.839423
| 1.214693
|
args = args or self.parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.version:
print(version.VERSION)
sys.exit(0)
|
def run(self, args=None)
|
Parse command line arguments if necessary then run program.
By default this method will just take of the --version flag.
The logic for other flags should be handled by your subclass
| 2.702834
| 2.727895
| 0.990813
|
if command:
return self.registered[command].get(
'description', 'No help available'
)
return ', '.join(sorted(self.registered))
|
def help_function(self, command=None)
|
Show help for all available commands or just a single one
| 5.25593
| 5.386869
| 0.975693
|
super(Program, self).add_command(command, function, description)
self.service.register(command, function)
|
def add_command(self, command, function, description=None)
|
Register a new function for command
| 4.927929
| 4.717709
| 1.04456
|
args = args or self.parser.parse_args()
super(Program, self).run(args)
# Read configuration file if any
if args.config is not None:
filepath = args.config
self.config.read(filepath)
# Start workers then wait until they finish work
[w.start() for w in self.workers]
[w.join() for w in self.workers]
|
def run(self, args=None)
|
Parse comand line arguments/flags and run program
| 4.000923
| 3.754247
| 1.065706
|
def make(addr):
c = Client(addr)
c.socket._set_recv_timeout(timeout)
return c
if ',' in addr:
addrs = addr.split(',')
addrs = [a.strip() for a in addrs]
return {a: make(a) for a in addrs}
return make(addr)
|
def create_client(self, addr, timeout)
|
Create client(s) based on addr
| 3.611758
| 3.664788
| 0.98553
|
try:
return client.call(command, *args)
except Exception as e:
return None, str(e)
|
def _call_single(self, client, command, *args)
|
Call single
| 3.1772
| 3.45717
| 0.919018
|
responses, errors = {}, {}
for addr, client in clients.items():
res, err = self._call_single(client, command, *args)
responses[addr] = res
errors[addr] = err
return responses, errors
|
def _call_multi(self, clients, command, *args)
|
Call multi
| 2.741015
| 2.717337
| 1.008714
|
if isinstance(self.c, dict):
return self._call_multi(self.c, command, *args)
return self._call_single(self.c, command, *args)
|
def call(self, command, *args)
|
Call remote service(s)
| 3.222865
| 3.141621
| 1.025861
|
if isinstance(self.c, dict):
for client in self.c.values():
client.sock.close()
return
self.c.socket.close()
|
def close(self)
|
Close socket(s)
| 5.313388
| 4.599264
| 1.155269
|
if self.kind is 'local':
what = res if not err else err
print(what)
return
if self.kind is 'remote':
if colored:
red, green, reset = Fore.RED, Fore.GREEN, Fore.RESET
else:
red = green = reset = ''
if err:
what = prefix + red + 'remote err: {}'.format(err) + reset
else:
what = prefix + green + str(res) + reset
print(what)
|
def _show(self, res, err, prefix='', colored=False)
|
Show result or error
| 3.37502
| 3.356955
| 1.005381
|
if not command:
return
# Look for local methods first
try:
res = self.registered[command]['function'](self, *args)
return Response('local', res, None)
# Method not found, try remote
except KeyError:
# Execute remote command
res, err = self.client.call(command, *args)
return Response('remote', res, err, self.client.is_multi())
# Local exception
except Exception as e:
return Response('local', res, str(e))
|
def call(self, command, *args)
|
Execute local OR remote command and show response
| 5.593123
| 4.981355
| 1.122812
|
parts = util.split(text)
command = parts[0] if text and parts else None
command = command.lower() if command else None
args = parts[1:] if len(parts) > 1 else []
return (command, args)
|
def parse_input(self, text)
|
Parse ctl user input. Double quotes are used
to group together multi words arguments.
| 4.045995
| 3.621979
| 1.117067
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.