sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def remove_comments(xml):
"""
Remove comments, as they can break the xml parser.
See html5lib issue #122 ( http://code.google.com/p/html5lib/issues/detail?id=122 ).
>>> remove_comments('<!-- -->')
''
>>> remove_comments('<!--\\n-->')
''
>>> remove_comments('<p>stuff<!-- \\n -->stuff</p>')
'<p>stuffstuff</p>'
"""
regex = re.compile(r'<!--.*?-->', re.DOTALL)
return regex.sub('', xml)
|
Remove comments, as they can break the xml parser.
See html5lib issue #122 ( http://code.google.com/p/html5lib/issues/detail?id=122 ).
>>> remove_comments('<!-- -->')
''
>>> remove_comments('<!--\\n-->')
''
>>> remove_comments('<p>stuff<!-- \\n -->stuff</p>')
'<p>stuffstuff</p>'
|
entailment
|
def remove_newlines(xml):
r"""Remove newlines in the xml.
If the newline separates words in text, then replace with a space instead.
>>> remove_newlines('<p>para one</p>\n<p>para two</p>')
'<p>para one</p><p>para two</p>'
>>> remove_newlines('<p>line one\nline two</p>')
'<p>line one line two</p>'
>>> remove_newlines('one\n1')
'one 1'
>>> remove_newlines('hey!\nmore text!')
'hey! more text!'
"""
# Normalize newlines.
xml = xml.replace('\r\n', '\n')
xml = xml.replace('\r', '\n')
# Remove newlines that don't separate text. The remaining ones do separate text.
xml = re.sub(r'(?<=[>\s])\n(?=[<\s])', '', xml)
xml = xml.replace('\n', ' ')
return xml.strip()
|
r"""Remove newlines in the xml.
If the newline separates words in text, then replace with a space instead.
>>> remove_newlines('<p>para one</p>\n<p>para two</p>')
'<p>para one</p><p>para two</p>'
>>> remove_newlines('<p>line one\nline two</p>')
'<p>line one line two</p>'
>>> remove_newlines('one\n1')
'one 1'
>>> remove_newlines('hey!\nmore text!')
'hey! more text!'
|
entailment
|
def remove_insignificant_text_nodes(dom):
"""
For html elements that should not have text nodes inside them, remove all
whitespace. For elements that may have text, collapse multiple spaces to a
single space.
"""
nodes_to_remove = []
for node in walk_dom(dom):
if is_text(node):
text = node.nodeValue
if node.parentNode.tagName in _non_text_node_tags:
nodes_to_remove.append(node)
else:
node.nodeValue = re.sub(r'\s+', ' ', text)
for node in nodes_to_remove:
remove_node(node)
|
For html elements that should not have text nodes inside them, remove all
whitespace. For elements that may have text, collapse multiple spaces to a
single space.
|
entailment
|
def get_child(parent, child_index):
"""
Get the child at the given index, or return None if it doesn't exist.
"""
if child_index < 0 or child_index >= len(parent.childNodes):
return None
return parent.childNodes[child_index]
|
Get the child at the given index, or return None if it doesn't exist.
|
entailment
|
def get_location(dom, location):
"""
Get the node at the specified location in the dom.
Location is a sequence of child indices, starting at the children of the
root element. If there is no node at this location, raise a ValueError.
"""
node = dom.documentElement
for i in location:
node = get_child(node, i)
if not node:
raise ValueError('Node at location %s does not exist.' % location) #TODO: line not covered
return node
|
Get the node at the specified location in the dom.
Location is a sequence of child indices, starting at the children of the
root element. If there is no node at this location, raise a ValueError.
|
entailment
|
def check_text_similarity(a_dom, b_dom, cutoff):
"""Check whether two dom trees have similar text or not."""
a_words = list(tree_words(a_dom))
b_words = list(tree_words(b_dom))
sm = WordMatcher(a=a_words, b=b_words)
if sm.text_ratio() >= cutoff:
return True
return False
|
Check whether two dom trees have similar text or not.
|
entailment
|
def tree_words(node):
"""Return all the significant text below the given node as a list of words.
>>> list(tree_words(parse_minidom('<h1>one</h1> two <div>three<em>four</em></div>')))
['one', 'two', 'three', 'four']
"""
for word in split_text(tree_text(node)):
word = word.strip()
if word:
yield word
|
Return all the significant text below the given node as a list of words.
>>> list(tree_words(parse_minidom('<h1>one</h1> two <div>three<em>four</em></div>')))
['one', 'two', 'three', 'four']
|
entailment
|
def tree_text(node):
"""
>>> tree_text(parse_minidom('<h1>one</h1>two<div>three<em>four</em></div>'))
'one two three four'
"""
text = []
for descendant in walk_dom(node):
if is_text(descendant):
text.append(descendant.nodeValue)
return ' '.join(text)
|
>>> tree_text(parse_minidom('<h1>one</h1>two<div>three<em>four</em></div>'))
'one two three four'
|
entailment
|
def insert_or_append(parent, node, next_sibling):
"""
Insert the node before next_sibling. If next_sibling is None, append the node last instead.
"""
# simple insert
if next_sibling:
parent.insertBefore(node, next_sibling)
else:
parent.appendChild(node)
|
Insert the node before next_sibling. If next_sibling is None, append the node last instead.
|
entailment
|
def wrap(node, tag):
"""Wrap the given tag around a node."""
wrap_node = node.ownerDocument.createElement(tag)
parent = node.parentNode
if parent:
parent.replaceChild(wrap_node, node)
wrap_node.appendChild(node)
return wrap_node
|
Wrap the given tag around a node.
|
entailment
|
def wrap_inner(node, tag):
"""Wrap the given tag around the contents of a node."""
children = list(node.childNodes)
wrap_node = node.ownerDocument.createElement(tag)
for c in children:
wrap_node.appendChild(c)
node.appendChild(wrap_node)
|
Wrap the given tag around the contents of a node.
|
entailment
|
def unwrap(node):
"""Remove a node, replacing it with its children."""
for child in list(node.childNodes):
node.parentNode.insertBefore(child, node)
remove_node(node)
|
Remove a node, replacing it with its children.
|
entailment
|
def full_split(text, regex):
"""
Split the text by the regex, keeping all parts.
The parts should re-join back into the original text.
>>> list(full_split('word', re.compile('&.*?')))
['word']
"""
while text:
m = regex.search(text)
if not m:
yield text
break
left = text[:m.start()]
middle = text[m.start():m.end()]
right = text[m.end():]
if left:
yield left
if middle:
yield middle
text = right
|
Split the text by the regex, keeping all parts.
The parts should re-join back into the original text.
>>> list(full_split('word', re.compile('&.*?')))
['word']
|
entailment
|
def multi_split(text, regexes):
"""
Split the text by the given regexes, in priority order.
Make sure that the regex is parenthesized so that matches are returned in
re.split().
Splitting on a single regex works like normal split.
>>> '|'.join(multi_split('one two three', [r'\w+']))
'one| |two| |three'
Splitting on digits first separates the digits from their word
>>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+']))
'one|234|five| |678'
Splitting on words first keeps the word with digits intact.
>>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+']))
'one234five| |678'
"""
def make_regex(s):
return re.compile(s) if isinstance(s, basestring) else s
regexes = [make_regex(r) for r in regexes]
# Run the list of pieces through the regex split, splitting it into more
# pieces. Once a piece has been matched, add it to finished_pieces and
# don't split it again. The pieces should always join back together to form
# the original text.
piece_list = [text]
finished_pieces = set()
def apply_re(regex, piece_list):
for piece in piece_list:
if piece in finished_pieces:
yield piece
continue
for s in full_split(piece, regex):
if regex.match(s):
finished_pieces.add(s)
if s:
yield s
for regex in regexes:
piece_list = list(apply_re(regex, piece_list))
assert ''.join(piece_list) == text
return piece_list
|
Split the text by the given regexes, in priority order.
Make sure that the regex is parenthesized so that matches are returned in
re.split().
Splitting on a single regex works like normal split.
>>> '|'.join(multi_split('one two three', [r'\w+']))
'one| |two| |three'
Splitting on digits first separates the digits from their word
>>> '|'.join(multi_split('one234five 678', [r'\d+', r'\w+']))
'one|234|five| |678'
Splitting on words first keeps the word with digits intact.
>>> '|'.join(multi_split('one234five 678', [r'\w+', r'\d+']))
'one234five| |678'
|
entailment
|
def text_ratio(self):
"""Return a measure of the sequences' word similarity (float in [0,1]).
Each word has weight equal to its length for this measure
>>> m = WordMatcher(a=['abcdef', '12'], b=['abcdef', '34']) # 3/4 of the text is the same
>>> '%.3f' % m.ratio() # normal ratio fails
'0.500'
>>> '%.3f' % m.text_ratio() # text ratio is accurate
'0.750'
"""
return _calculate_ratio(
self.match_length(),
self._text_length(self.a) + self._text_length(self.b),
)
|
Return a measure of the sequences' word similarity (float in [0,1]).
Each word has weight equal to its length for this measure
>>> m = WordMatcher(a=['abcdef', '12'], b=['abcdef', '34']) # 3/4 of the text is the same
>>> '%.3f' % m.ratio() # normal ratio fails
'0.500'
>>> '%.3f' % m.text_ratio() # text ratio is accurate
'0.750'
|
entailment
|
def match_length(self):
""" Find the total length of all words that match between the two sequences."""
length = 0
for match in self.get_matching_blocks():
a, b, size = match
length += self._text_length(self.a[a:a+size])
return length
|
Find the total length of all words that match between the two sequences.
|
entailment
|
def run_edit_script(self):
"""
Run an xml edit script, and return the new html produced.
"""
for action, location, properties in self.edit_script:
if action == 'delete':
node = get_location(self.dom, location)
self.action_delete(node)
elif action == 'insert':
parent = get_location(self.dom, location[:-1])
child_index = location[-1]
self.action_insert(parent, child_index, **properties)
return self.dom
|
Run an xml edit script, and return the new html produced.
|
entailment
|
def add_changes_markup(dom, ins_nodes, del_nodes):
"""
Add <ins> and <del> tags to the dom to show changes.
"""
# add markup for inserted and deleted sections
for node in reversed(del_nodes):
# diff algorithm deletes nodes in reverse order, so un-reverse the
# order for this iteration
insert_or_append(node.orig_parent, node, node.orig_next_sibling)
wrap(node, 'del')
for node in ins_nodes:
wrap(node, 'ins')
# Perform post-processing and cleanup.
remove_nesting(dom, 'del')
remove_nesting(dom, 'ins')
sort_del_before_ins(dom)
merge_adjacent(dom, 'del')
merge_adjacent(dom, 'ins')
|
Add <ins> and <del> tags to the dom to show changes.
|
entailment
|
def remove_nesting(dom, tag_name):
"""
Unwrap items in the node list that have ancestors with the same tag.
"""
for node in dom.getElementsByTagName(tag_name):
for ancestor in ancestors(node):
if ancestor is node:
continue
if ancestor is dom.documentElement:
break
if ancestor.tagName == tag_name:
unwrap(node)
break
|
Unwrap items in the node list that have ancestors with the same tag.
|
entailment
|
def sort_nodes(dom, cmp_func):
"""
Sort the nodes of the dom in-place, based on a comparison function.
"""
dom.normalize()
for node in list(walk_dom(dom, elements_only=True)):
prev_sib = node.previousSibling
while prev_sib and cmp_func(prev_sib, node) == 1:
node.parentNode.insertBefore(node, prev_sib)
prev_sib = node.previousSibling
|
Sort the nodes of the dom in-place, based on a comparison function.
|
entailment
|
def merge_adjacent(dom, tag_name):
"""
Merge all adjacent tags with the specified tag name.
Return the number of merges performed.
"""
for node in dom.getElementsByTagName(tag_name):
prev_sib = node.previousSibling
if prev_sib and prev_sib.nodeName == node.tagName:
for child in list(node.childNodes):
prev_sib.appendChild(child)
remove_node(node)
|
Merge all adjacent tags with the specified tag name.
Return the number of merges performed.
|
entailment
|
def distribute(node):
"""
Wrap a copy of the given element around the contents of each of its
children, removing the node in the process.
"""
children = list(c for c in node.childNodes if is_element(c))
unwrap(node)
tag_name = node.tagName
for c in children:
wrap_inner(c, tag_name)
|
Wrap a copy of the given element around the contents of each of its
children, removing the node in the process.
|
entailment
|
def save(self, *args, **kwargs):
"""
Save object to the database. Removes all other entries if there
are any.
"""
self.__class__.objects.exclude(id=self.id).delete()
super(SingletonModel, self).save(*args, **kwargs)
|
Save object to the database. Removes all other entries if there
are any.
|
entailment
|
def get_magicc_region_to_openscm_region_mapping(inverse=False):
"""Get the mappings from MAGICC to OpenSCM regions.
This is not a pure inverse of the other way around. For example, we never provide
"GLOBAL" as a MAGICC return value because it's unnecesarily confusing when we also
have "World". Fortunately MAGICC doesn't ever read the name "GLOBAL" so this
shouldn't matter.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. MAGICC to OpenSCM mappings
Returns
-------
dict
Dictionary of mappings
"""
def get_openscm_replacement(in_region):
world = "World"
if in_region in ("WORLD", "GLOBAL"):
return world
if in_region in ("BUNKERS"):
return DATA_HIERARCHY_SEPARATOR.join([world, "Bunkers"])
elif in_region.startswith(("NH", "SH")):
in_region = in_region.replace("-", "")
hem = "Northern Hemisphere" if "NH" in in_region else "Southern Hemisphere"
if in_region in ("NH", "SH"):
return DATA_HIERARCHY_SEPARATOR.join([world, hem])
land_ocean = "Land" if "LAND" in in_region else "Ocean"
return DATA_HIERARCHY_SEPARATOR.join([world, hem, land_ocean])
else:
return DATA_HIERARCHY_SEPARATOR.join([world, in_region])
# we generate the mapping dynamically, the first name in the list
# is the one which will be used for inverse mappings
_magicc_regions = [
"WORLD",
"GLOBAL",
"OECD90",
"ALM",
"REF",
"ASIA",
"R5ASIA",
"R5OECD",
"R5REF",
"R5MAF",
"R5LAM",
"R6OECD90",
"R6REF",
"R6LAM",
"R6MAF",
"R6ASIA",
"NHOCEAN",
"SHOCEAN",
"NHLAND",
"SHLAND",
"NH-OCEAN",
"SH-OCEAN",
"NH-LAND",
"SH-LAND",
"SH",
"NH",
"BUNKERS",
]
replacements = {}
for magicc_region in _magicc_regions:
openscm_region = get_openscm_replacement(magicc_region)
# i.e. if we've already got a value for the inverse, we don't want to overwrite
if (openscm_region in replacements.values()) and inverse:
continue
replacements[magicc_region] = openscm_region
if inverse:
return {v: k for k, v in replacements.items()}
else:
return replacements
|
Get the mappings from MAGICC to OpenSCM regions.
This is not a pure inverse of the other way around. For example, we never provide
"GLOBAL" as a MAGICC return value because it's unnecesarily confusing when we also
have "World". Fortunately MAGICC doesn't ever read the name "GLOBAL" so this
shouldn't matter.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. MAGICC to OpenSCM mappings
Returns
-------
dict
Dictionary of mappings
|
entailment
|
def convert_magicc_to_openscm_regions(regions, inverse=False):
"""
Convert MAGICC regions to OpenSCM regions
Parameters
----------
regions : list_like, str
Regions to convert
inverse : bool
If True, convert the other way i.e. convert OpenSCM regions to MAGICC7
regions
Returns
-------
``type(regions)``
Set of converted regions
"""
if isinstance(regions, (list, pd.Index)):
return [_apply_convert_magicc_to_openscm_regions(r, inverse) for r in regions]
else:
return _apply_convert_magicc_to_openscm_regions(regions, inverse)
|
Convert MAGICC regions to OpenSCM regions
Parameters
----------
regions : list_like, str
Regions to convert
inverse : bool
If True, convert the other way i.e. convert OpenSCM regions to MAGICC7
regions
Returns
-------
``type(regions)``
Set of converted regions
|
entailment
|
def get_magicc7_to_openscm_variable_mapping(inverse=False):
"""Get the mappings from MAGICC7 to OpenSCM variables.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. OpenSCM to MAGICC7 mappings
Returns
-------
dict
Dictionary of mappings
"""
def get_openscm_replacement(in_var):
if in_var.endswith("_INVERSE_EMIS"):
prefix = "Inverse Emissions"
elif in_var.endswith("_EMIS"):
prefix = "Emissions"
elif in_var.endswith("_CONC"):
prefix = "Atmospheric Concentrations"
elif in_var.endswith("_RF"):
prefix = "Radiative Forcing"
elif in_var.endswith("_OT"):
prefix = "Optical Thickness"
else:
raise ValueError("This shouldn't happen")
variable = in_var.split("_")[0]
# I hate edge cases
if variable.endswith("EQ"):
variable = variable.replace("EQ", " Equivalent")
if "GHG" in variable:
variable = variable.replace("GHG", "Greenhouse Gases")
if "BIOMASSAER" in variable:
variable = variable.replace("BIOMASSAER", "Aerosols|MAGICC AFOLU")
if "CO2CH4N2O" in variable:
variable = variable.replace("CO2CH4N2O", "CO2, CH4 and N2O")
aggregate_indicators = {
"KYOTO": "Kyoto Gases",
"FGASSUM": "F Gases",
"MHALOSUM": "Montreal Protocol Halogen Gases",
}
for agg_indicator, long_name in aggregate_indicators.items():
if variable.startswith(agg_indicator):
stripped_var = variable.replace(agg_indicator, "")
if stripped_var:
variable = DATA_HIERARCHY_SEPARATOR.join([stripped_var, long_name])
else:
variable = long_name
edge_case_B = variable.upper() in ("HCFC141B", "HCFC142B")
if variable.endswith("I"):
variable = DATA_HIERARCHY_SEPARATOR.join(
[variable[:-1], "MAGICC Fossil and Industrial"]
)
elif variable.endswith("B") and not edge_case_B:
variable = DATA_HIERARCHY_SEPARATOR.join([variable[:-1], "MAGICC AFOLU"])
case_adjustments = {
"SOX": "SOx",
"NOX": "NOx",
"HFC134A": "HFC134a",
"HFC143A": "HFC143a",
"HFC152A": "HFC152a",
"HFC227EA": "HFC227ea",
"HFC236FA": "HFC236fa",
"HFC245FA": "HFC245fa",
"HFC365MFC": "HFC365mfc",
"HCFC141B": "HCFC141b",
"HCFC142B": "HCFC142b",
"CH3CCL3": "CH3CCl3",
"CCL4": "CCl4",
"CH3CL": "CH3Cl",
"CH2CL2": "CH2Cl2",
"CHCL3": "CHCl3",
"CH3BR": "CH3Br",
"HALON1211": "Halon1211",
"HALON1301": "Halon1301",
"HALON2402": "Halon2402",
"HALON1202": "Halon1202",
"SOLAR": "Solar",
"VOLCANIC": "Volcanic",
"EXTRA": "Extra",
}
variable = apply_string_substitutions(variable, case_adjustments)
return DATA_HIERARCHY_SEPARATOR.join([prefix, variable])
magicc7_suffixes = ["_EMIS", "_CONC", "_RF", "_OT", "_INVERSE_EMIS"]
magicc7_base_vars = MAGICC7_EMISSIONS_UNITS.magicc_variable.tolist() + [
"SOLAR",
"VOLCANIC",
"CO2EQ",
"KYOTOCO2EQ",
"FGASSUMHFC134AEQ",
"MHALOSUMCFC12EQ",
"GHG",
"KYOTOGHG",
"FGASSUM",
"MHALOSUM",
"BIOMASSAER",
"CO2CH4N2O",
"EXTRA",
]
magicc7_vars = [
base_var + suffix
for base_var in magicc7_base_vars
for suffix in magicc7_suffixes
]
replacements = {m7v: get_openscm_replacement(m7v) for m7v in magicc7_vars}
replacements.update(
{
"SURFACE_TEMP": "Surface Temperature",
"TOTAL_INCLVOLCANIC_RF": "Radiative Forcing",
"VOLCANIC_ANNUAL_RF": "Radiative Forcing|Volcanic",
"TOTAL_ANTHRO_RF": "Radiative Forcing|Anthropogenic",
"TOTAER_DIR_RF": "Radiative Forcing|Aerosols|Direct Effect",
"CLOUD_TOT_RF": "Radiative Forcing|Aerosols|Indirect Effect",
"MINERALDUST_RF": "Radiative Forcing|Mineral Dust",
"STRATOZ_RF": "Radiative Forcing|Stratospheric Ozone",
"TROPOZ_RF": "Radiative Forcing|Tropospheric Ozone",
"CH4OXSTRATH2O_RF": "Radiative Forcing|CH4 Oxidation Stratospheric H2O", # what is this
"LANDUSE_RF": "Radiative Forcing|Land-use Change",
"BCSNOW_RF": "Radiative Forcing|Black Carbon on Snow",
"CO2PF_EMIS": "Land to Air Flux|CO2|MAGICC Permafrost",
# "CH4PF_EMIS": "Land to Air Flux|CH4|MAGICC Permafrost", # TODO: test and then add when needed
}
)
agg_ocean_heat_top = "Aggregated Ocean Heat Content"
heat_content_aggreg_depths = {
"HEATCONTENT_AGGREG_DEPTH{}".format(i): "{}{}Depth {}".format(
agg_ocean_heat_top, DATA_HIERARCHY_SEPARATOR, i
)
for i in range(1, 4)
}
replacements.update(heat_content_aggreg_depths)
replacements.update({"HEATCONTENT_AGGREG_TOTAL": agg_ocean_heat_top})
ocean_temp_layer = {
"OCEAN_TEMP_LAYER_{0:03d}".format(i): "Ocean Temperature{}Layer {}".format(
DATA_HIERARCHY_SEPARATOR, i
)
for i in range(1, 999)
}
replacements.update(ocean_temp_layer)
if inverse:
return {v: k for k, v in replacements.items()}
else:
return replacements
|
Get the mappings from MAGICC7 to OpenSCM variables.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. OpenSCM to MAGICC7 mappings
Returns
-------
dict
Dictionary of mappings
|
entailment
|
def convert_magicc7_to_openscm_variables(variables, inverse=False):
"""
Convert MAGICC7 variables to OpenSCM variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert OpenSCM variables to MAGICC7
variables
Returns
-------
``type(variables)``
Set of converted variables
"""
if isinstance(variables, (list, pd.Index)):
return [
_apply_convert_magicc7_to_openscm_variables(v, inverse) for v in variables
]
else:
return _apply_convert_magicc7_to_openscm_variables(variables, inverse)
|
Convert MAGICC7 variables to OpenSCM variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert OpenSCM variables to MAGICC7
variables
Returns
-------
``type(variables)``
Set of converted variables
|
entailment
|
def get_magicc6_to_magicc7_variable_mapping(inverse=False):
"""Get the mappings from MAGICC6 to MAGICC7 variables.
Note that this mapping is not one to one. For example, "HFC4310", "HFC43-10" and
"HFC-43-10" in MAGICC6 both map to "HFC4310" in MAGICC7 but "HFC4310" in
MAGICC7 maps back to "HFC4310".
Note that HFC-245fa was mistakenly labelled as HFC-245ca in MAGICC6. In reality,
they are not the same thing. However, the MAGICC6 labelling was merely a typo so
the mapping between the two is one-to-one.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. MAGICC7 to MAGICC6 mappings
Returns
-------
dict
Dictionary of mappings
"""
# we generate the mapping dynamically, the first name in the list
# is the one which will be used for inverse mappings
magicc6_simple_mapping_vars = [
"KYOTO-CO2EQ",
"CO2I",
"CO2B",
"CH4",
"N2O",
"BC",
"OC",
"SOx",
"NOx",
"NMVOC",
"CO",
"SF6",
"NH3",
"CF4",
"C2F6",
"HFC4310",
"HFC43-10",
"HFC-43-10",
"HFC4310",
"HFC134a",
"HFC143a",
"HFC227ea",
"CCl4",
"CH3CCl3",
"HFC245fa",
"Halon 1211",
"Halon 1202",
"Halon 1301",
"Halon 2402",
"Halon1211",
"Halon1202",
"Halon1301",
"Halon2402",
"CH3Br",
"CH3Cl",
"C6F14",
]
magicc6_sometimes_hyphen_vars = [
"CFC-11",
"CFC-12",
"CFC-113",
"CFC-114",
"CFC-115",
"HCFC-22",
"HFC-23",
"HFC-32",
"HFC-125",
"HFC-134a",
"HFC-143a",
"HCFC-141b",
"HCFC-142b",
"HFC-227ea",
"HFC-245fa",
]
magicc6_sometimes_hyphen_vars = [
v.replace("-", "") for v in magicc6_sometimes_hyphen_vars
] + magicc6_sometimes_hyphen_vars
magicc6_sometimes_underscore_vars = [
"HFC43_10",
"CFC_11",
"CFC_12",
"CFC_113",
"CFC_114",
"CFC_115",
"HCFC_22",
"HCFC_141b",
"HCFC_142b",
]
magicc6_sometimes_underscore_replacements = {
v: v.replace("_", "") for v in magicc6_sometimes_underscore_vars
}
special_case_replacements = {
"FossilCO2": "CO2I",
"OtherCO2": "CO2B",
"MCF": "CH3CCL3",
"CARB_TET": "CCL4",
"MHALOSUMCFC12EQ": "MHALOSUMCFC12EQ", # special case to avoid confusion with MCF
}
one_way_replacements = {"HFC-245ca": "HFC245FA", "HFC245ca": "HFC245FA"}
all_possible_magicc6_vars = (
magicc6_simple_mapping_vars
+ magicc6_sometimes_hyphen_vars
+ magicc6_sometimes_underscore_vars
+ list(special_case_replacements.keys())
+ list(one_way_replacements.keys())
)
replacements = {}
for m6v in all_possible_magicc6_vars:
if m6v in special_case_replacements:
replacements[m6v] = special_case_replacements[m6v]
elif (
m6v in magicc6_sometimes_underscore_vars and not inverse
): # underscores one way
replacements[m6v] = magicc6_sometimes_underscore_replacements[m6v]
elif (m6v in one_way_replacements) and not inverse:
replacements[m6v] = one_way_replacements[m6v]
else:
m7v = m6v.replace("-", "").replace(" ", "").upper()
# i.e. if we've already got a value for the inverse, we don't
# want to overwrite it
if (m7v in replacements.values()) and inverse:
continue
replacements[m6v] = m7v
if inverse:
return {v: k for k, v in replacements.items()}
else:
return replacements
|
Get the mappings from MAGICC6 to MAGICC7 variables.
Note that this mapping is not one to one. For example, "HFC4310", "HFC43-10" and
"HFC-43-10" in MAGICC6 both map to "HFC4310" in MAGICC7 but "HFC4310" in
MAGICC7 maps back to "HFC4310".
Note that HFC-245fa was mistakenly labelled as HFC-245ca in MAGICC6. In reality,
they are not the same thing. However, the MAGICC6 labelling was merely a typo so
the mapping between the two is one-to-one.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. MAGICC7 to MAGICC6 mappings
Returns
-------
dict
Dictionary of mappings
|
entailment
|
def convert_magicc6_to_magicc7_variables(variables, inverse=False):
"""
Convert MAGICC6 variables to MAGICC7 variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert MAGICC7 variables to MAGICC6
variables
Raises
------
ValueError
If you try to convert HFC245ca, or some variant thereof, you will get a
ValueError. The reason is that this variable was never meant to be included in
MAGICC6, it was just an accident. See, for example, the text in the
description section of ``pymagicc/MAGICC6/run/HISTRCP_HFC245fa_CONC.IN``:
"...HFC245fa, rather than HFC245ca, is the actually used isomer.".
Returns
-------
``type(variables)``
Set of converted variables
"""
if isinstance(variables, (list, pd.Index)):
return [
_apply_convert_magicc6_to_magicc7_variables(v, inverse) for v in variables
]
else:
return _apply_convert_magicc6_to_magicc7_variables(variables, inverse)
|
Convert MAGICC6 variables to MAGICC7 variables
Parameters
----------
variables : list_like, str
Variables to convert
inverse : bool
If True, convert the other way i.e. convert MAGICC7 variables to MAGICC6
variables
Raises
------
ValueError
If you try to convert HFC245ca, or some variant thereof, you will get a
ValueError. The reason is that this variable was never meant to be included in
MAGICC6, it was just an accident. See, for example, the text in the
description section of ``pymagicc/MAGICC6/run/HISTRCP_HFC245fa_CONC.IN``:
"...HFC245fa, rather than HFC245ca, is the actually used isomer.".
Returns
-------
``type(variables)``
Set of converted variables
|
entailment
|
def get_pint_to_fortran_safe_units_mapping(inverse=False):
"""Get the mappings from Pint to Fortran safe units.
Fortran can't handle special characters like "^" or "/" in names, but we need
these in Pint. Conversely, Pint stores variables with spaces by default e.g. "Mt
CO2 / yr" but we don't want these in the input files as Fortran is likely to think
the whitespace is a delimiter.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. Fortran safe to Pint mappings
Returns
-------
dict
Dictionary of mappings
"""
replacements = {"^": "super", "/": "per", " ": ""}
if inverse:
replacements = {v: k for k, v in replacements.items()}
# mapping nothing to something is obviously not going to work in the inverse
# hence remove
replacements.pop("")
return replacements
|
Get the mappings from Pint to Fortran safe units.
Fortran can't handle special characters like "^" or "/" in names, but we need
these in Pint. Conversely, Pint stores variables with spaces by default e.g. "Mt
CO2 / yr" but we don't want these in the input files as Fortran is likely to think
the whitespace is a delimiter.
Parameters
----------
inverse : bool
If True, return the inverse mappings i.e. Fortran safe to Pint mappings
Returns
-------
dict
Dictionary of mappings
|
entailment
|
def convert_pint_to_fortran_safe_units(units, inverse=False):
"""
Convert Pint units to Fortran safe units
Parameters
----------
units : list_like, str
Units to convert
inverse : bool
If True, convert the other way i.e. convert Fortran safe units to Pint units
Returns
-------
``type(units)``
Set of converted units
"""
if inverse:
return apply_string_substitutions(units, FORTRAN_SAFE_TO_PINT_UNITS_MAPPING)
else:
return apply_string_substitutions(units, PINT_TO_FORTRAN_SAFE_UNITS_MAPPING)
|
Convert Pint units to Fortran safe units
Parameters
----------
units : list_like, str
Units to convert
inverse : bool
If True, convert the other way i.e. convert Fortran safe units to Pint units
Returns
-------
``type(units)``
Set of converted units
|
entailment
|
def run_evaluate(self) -> None:
"""
Overrides the base evaluation to set the value to the evaluation result of the value
expression in the schema
"""
result = None
self.eval_error = False
if self._needs_evaluation:
result = self._schema.value.evaluate(self._evaluation_context)
self.eval_error = result is None
if self.eval_error:
return
# Only set the value if it conforms to the field type
if not self._schema.is_type_of(result):
try:
result = self._schema.type_object(result)
except Exception as err:
logging.debug('{} in casting {} to {} for field {}. Error: {}'.format(
type(err).__name__, result, self._schema.type,
self._schema.fully_qualified_name, err))
self.eval_error = True
return
try:
result = self._schema.sanitize_object(result)
except Exception as err:
logging.debug('{} in sanitizing {} of type {} for field {}. Error: {}'.format(
type(err).__name__, result, self._schema.type, self._schema.fully_qualified_name,
err))
self.eval_error = True
return
self.value = result
|
Overrides the base evaluation to set the value to the evaluation result of the value
expression in the schema
|
entailment
|
def set(self, key: Any, value: Any) -> None:
""" Sets the value of a key to a supplied value """
if key is not None:
self[key] = value
|
Sets the value of a key to a supplied value
|
entailment
|
def increment(self, key: Any, by: int = 1) -> None:
""" Increments the value set against a key. If the key is not present, 0 is assumed as the initial state """
if key is not None:
self[key] = self.get(key, 0) + by
|
Increments the value set against a key. If the key is not present, 0 is assumed as the initial state
|
entailment
|
def insert(self, index: int, obj: Any) -> None:
""" Inserts an item to the list as long as it is not None """
if obj is not None:
super().insert(index, obj)
|
Inserts an item to the list as long as it is not None
|
entailment
|
def get_dattype_regionmode(regions, scen7=False):
"""
Get the THISFILE_DATTYPE and THISFILE_REGIONMODE flags for a given region set.
In all MAGICC input files, there are two flags: THISFILE_DATTYPE and
THISFILE_REGIONMODE. These tell MAGICC how to read in a given input file. This
function maps the regions which are in a given file to the value of these flags
expected by MAGICC.
Parameters
----------
regions : list_like
The regions to get THISFILE_DATTYPE and THISFILE_REGIONMODE flags for.
scen7 : bool, optional
Whether the file we are getting the flags for is a SCEN7 file or not.
Returns
-------
dict
Dictionary where the flags are the keys and the values are the value they
should be set to for the given inputs.
"""
dattype_flag = "THISFILE_DATTYPE"
regionmode_flag = "THISFILE_REGIONMODE"
region_dattype_row = _get_dattype_regionmode_regions_row(regions, scen7=scen7)
dattype = DATTYPE_REGIONMODE_REGIONS[dattype_flag.lower()][region_dattype_row].iloc[
0
]
regionmode = DATTYPE_REGIONMODE_REGIONS[regionmode_flag.lower()][
region_dattype_row
].iloc[0]
return {dattype_flag: dattype, regionmode_flag: regionmode}
|
Get the THISFILE_DATTYPE and THISFILE_REGIONMODE flags for a given region set.
In all MAGICC input files, there are two flags: THISFILE_DATTYPE and
THISFILE_REGIONMODE. These tell MAGICC how to read in a given input file. This
function maps the regions which are in a given file to the value of these flags
expected by MAGICC.
Parameters
----------
regions : list_like
The regions to get THISFILE_DATTYPE and THISFILE_REGIONMODE flags for.
scen7 : bool, optional
Whether the file we are getting the flags for is a SCEN7 file or not.
Returns
-------
dict
Dictionary where the flags are the keys and the values are the value they
should be set to for the given inputs.
|
entailment
|
def get_region_order(regions, scen7=False):
"""
Get the region order expected by MAGICC.
Parameters
----------
regions : list_like
The regions to get THISFILE_DATTYPE and THISFILE_REGIONMODE flags for.
scen7 : bool, optional
Whether the file we are getting the flags for is a SCEN7 file or not.
Returns
-------
list
Region order expected by MAGICC for the given region set.
"""
region_dattype_row = _get_dattype_regionmode_regions_row(regions, scen7=scen7)
region_order = DATTYPE_REGIONMODE_REGIONS["regions"][region_dattype_row].iloc[0]
return region_order
|
Get the region order expected by MAGICC.
Parameters
----------
regions : list_like
The regions to get THISFILE_DATTYPE and THISFILE_REGIONMODE flags for.
scen7 : bool, optional
Whether the file we are getting the flags for is a SCEN7 file or not.
Returns
-------
list
Region order expected by MAGICC for the given region set.
|
entailment
|
def get_special_scen_code(regions, emissions):
"""
Get special code for MAGICC6 SCEN files.
At the top of every MAGICC6 and MAGICC5 SCEN file there is a two digit
number. The first digit, the 'scenfile_region_code' tells MAGICC how many regions
data is being provided for. The second digit, the 'scenfile_emissions_code', tells
MAGICC which gases are in the SCEN file.
The variables which are part of ``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1`` are the
emissions species which are expected when scenfile_emissions_code is 1. Similarly,
``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0`` defines the emissions species which are
expected when scenfile_emissions_code is 0.
Having these definitions allows Pymagicc to check that the right
set of emissions has been provided before writing SCEN files.
Parameters
----------
region : list_like
Regions to get code for.
emissions : list-like
Emissions to get code for.
Raises
------
ValueError
If the special scen code cannot be determined.
Returns
-------
int
The special scen code for the regions-emissions combination provided.
"""
if sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0)) == sorted(set(emissions)):
scenfile_emissions_code = 0
elif sorted(set(PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1)) == sorted(set(emissions)):
scenfile_emissions_code = 1
else:
msg = "Could not determine scen special code for emissions {}".format(emissions)
raise ValueError(msg)
if set(regions) == set(["WORLD"]):
scenfile_region_code = 1
elif set(regions) == set(["WORLD", "OECD90", "REF", "ASIA", "ALM"]):
scenfile_region_code = 2
elif set(regions) == set(["WORLD", "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM"]):
scenfile_region_code = 3
elif set(regions) == set(
["WORLD", "R5OECD", "R5REF", "R5ASIA", "R5MAF", "R5LAM", "BUNKERS"]
):
scenfile_region_code = 4
try:
return scenfile_region_code * 10 + scenfile_emissions_code
except NameError:
msg = "Could not determine scen special code for regions {}".format(regions)
raise ValueError(msg)
|
Get special code for MAGICC6 SCEN files.
At the top of every MAGICC6 and MAGICC5 SCEN file there is a two digit
number. The first digit, the 'scenfile_region_code' tells MAGICC how many regions
data is being provided for. The second digit, the 'scenfile_emissions_code', tells
MAGICC which gases are in the SCEN file.
The variables which are part of ``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_1`` are the
emissions species which are expected when scenfile_emissions_code is 1. Similarly,
``PART_OF_SCENFILE_WITH_EMISSIONS_CODE_0`` defines the emissions species which are
expected when scenfile_emissions_code is 0.
Having these definitions allows Pymagicc to check that the right
set of emissions has been provided before writing SCEN files.
Parameters
----------
region : list_like
Regions to get code for.
emissions : list-like
Emissions to get code for.
Raises
------
ValueError
If the special scen code cannot be determined.
Returns
-------
int
The special scen code for the regions-emissions combination provided.
|
entailment
|
def determine_tool(filepath, tool_to_get):
"""
Determine the tool to use for reading/writing.
The function uses an internally defined set of mappings between filepaths,
regular expresions and readers/writers to work out which tool to use
for a given task, given the filepath.
It is intended for internal use only, but is public because of its
importance to the input/output of pymagicc.
If it fails, it will give clear error messages about why and what the
available regular expressions are.
.. code:: python
>>> mdata = MAGICCData()
>>> mdata.read(MAGICC7_DIR, HISTRCP_CO2I_EMIS.txt)
ValueError: Couldn't find appropriate writer for HISTRCP_CO2I_EMIS.txt.
The file must be one of the following types and the filepath must match its corresponding regular expression:
SCEN: ^.*\\.SCEN$
SCEN7: ^.*\\.SCEN7$
prn: ^.*\\.prn$
Parameters
----------
filepath : str
Name of the file to read/write, including extension
tool_to_get : str
The tool to get, valid options are "reader", "writer".
Invalid values will throw a NoReaderWriterError.
"""
file_regexp_reader_writer = {
"SCEN": {"regexp": r"^.*\.SCEN$", "reader": _ScenReader, "writer": _ScenWriter},
"SCEN7": {
"regexp": r"^.*\.SCEN7$",
"reader": _Scen7Reader,
"writer": _Scen7Writer,
},
"prn": {"regexp": r"^.*\.prn$", "reader": _PrnReader, "writer": _PrnWriter},
# "Sector": {"regexp": r".*\.SECTOR$", "reader": _Scen7Reader, "writer": _Scen7Writer},
"EmisIn": {
"regexp": r"^.*\_EMIS.*\.IN$",
"reader": _HistEmisInReader,
"writer": _HistEmisInWriter,
},
"ConcIn": {
"regexp": r"^.*\_CONC.*\.IN$",
"reader": _ConcInReader,
"writer": _ConcInWriter,
},
"OpticalThicknessIn": {
"regexp": r"^.*\_OT\.IN$",
"reader": _OpticalThicknessInReader,
"writer": _OpticalThicknessInWriter,
},
"RadiativeForcingIn": {
"regexp": r"^.*\_RF\.(IN|MON)$",
"reader": _RadiativeForcingInReader,
"writer": _RadiativeForcingInWriter,
},
"SurfaceTemperatureIn": {
"regexp": r"^.*SURFACE\_TEMP\.(IN|MON)$",
"reader": _SurfaceTemperatureInReader,
"writer": _SurfaceTemperatureInWriter,
},
"Out": {
"regexp": r"^DAT\_.*(?<!EMIS)\.OUT$",
"reader": _OutReader,
"writer": None,
},
"EmisOut": {
"regexp": r"^DAT\_.*EMIS\.OUT$",
"reader": _EmisOutReader,
"writer": None,
},
"InverseEmis": {
"regexp": r"^INVERSEEMIS\.OUT$",
"reader": _InverseEmisReader,
"writer": None,
},
"TempOceanLayersOut": {
"regexp": r"^TEMP\_OCEANLAYERS.*\.OUT$",
"reader": _TempOceanLayersOutReader,
"writer": None,
},
"BinOut": {
"regexp": r"^DAT\_.*\.BINOUT$",
"reader": _BinaryOutReader,
"writer": None,
},
"RCPData": {"regexp": r"^.*\.DAT", "reader": _RCPDatReader, "writer": None}
# "InverseEmisOut": {"regexp": r"^INVERSEEMIS\_.*\.OUT$", "reader": _Scen7Reader, "writer": _Scen7Writer},
}
fbase = basename(filepath)
if _unsupported_file(fbase):
raise NoReaderWriterError(
"{} is in an odd format for which we will never provide a reader/writer.".format(
filepath
)
)
for file_type, file_tools in file_regexp_reader_writer.items():
if re.match(file_tools["regexp"], fbase):
try:
return file_tools[tool_to_get]
except KeyError:
valid_tools = [k for k in file_tools.keys() if k != "regexp"]
error_msg = (
"MAGICCData does not know how to get a {}, "
"valid options are: {}".format(tool_to_get, valid_tools)
)
raise KeyError(error_msg)
para_file = "PARAMETERS.OUT"
if (filepath.endswith(".CFG")) and (tool_to_get == "reader"):
error_msg = (
"MAGCCInput cannot read .CFG files like {}, please use "
"pymagicc.io.read_cfg_file".format(filepath)
)
elif (filepath.endswith(para_file)) and (tool_to_get == "reader"):
error_msg = (
"MAGCCInput cannot read PARAMETERS.OUT as it is a config "
"style file, please use pymagicc.io.read_cfg_file"
)
else:
regexp_list_str = "\n".join(
[
"{}: {}".format(k, v["regexp"])
for k, v in file_regexp_reader_writer.items()
]
)
error_msg = (
"Couldn't find appropriate {} for {}.\nThe file must be one "
"of the following types and the filepath must match its "
"corresponding regular "
"expression:\n{}".format(tool_to_get, fbase, regexp_list_str)
)
raise NoReaderWriterError(error_msg)
|
Determine the tool to use for reading/writing.
The function uses an internally defined set of mappings between filepaths,
regular expresions and readers/writers to work out which tool to use
for a given task, given the filepath.
It is intended for internal use only, but is public because of its
importance to the input/output of pymagicc.
If it fails, it will give clear error messages about why and what the
available regular expressions are.
.. code:: python
>>> mdata = MAGICCData()
>>> mdata.read(MAGICC7_DIR, HISTRCP_CO2I_EMIS.txt)
ValueError: Couldn't find appropriate writer for HISTRCP_CO2I_EMIS.txt.
The file must be one of the following types and the filepath must match its corresponding regular expression:
SCEN: ^.*\\.SCEN$
SCEN7: ^.*\\.SCEN7$
prn: ^.*\\.prn$
Parameters
----------
filepath : str
Name of the file to read/write, including extension
tool_to_get : str
The tool to get, valid options are "reader", "writer".
Invalid values will throw a NoReaderWriterError.
|
entailment
|
def pull_cfg_from_parameters_out(parameters_out, namelist_to_read="nml_allcfgs"):
"""Pull out a single config set from a parameters_out namelist.
This function returns a single file with the config that needs to be passed to
MAGICC in order to do the same run as is represented by the values in
``parameters_out``.
Parameters
----------
parameters_out : dict, f90nml.Namelist
The parameters to dump
namelist_to_read : str
The namelist to read from the file.
Returns
-------
:obj:`f90nml.Namelist`
An f90nml object with the cleaned, read out config.
Examples
--------
>>> cfg = pull_cfg_from_parameters_out(magicc.metadata["parameters"])
>>> cfg.write("/somewhere/else/ANOTHERNAME.cfg")
"""
single_cfg = Namelist({namelist_to_read: {}})
for key, value in parameters_out[namelist_to_read].items():
if "file_tuning" in key:
single_cfg[namelist_to_read][key] = ""
else:
try:
if isinstance(value, str):
single_cfg[namelist_to_read][key] = value.strip(" \t\n\r").replace(
"\x00", ""
)
elif isinstance(value, list):
clean_list = [v.strip(" \t\n\r").replace("\x00", "") for v in value]
single_cfg[namelist_to_read][key] = [v for v in clean_list if v]
else:
assert isinstance(value, Number)
single_cfg[namelist_to_read][key] = value
except AttributeError:
if isinstance(value, list):
assert all([isinstance(v, Number) for v in value])
single_cfg[namelist_to_read][key] = value
else:
raise AssertionError(
"Unexpected cause in out parameters conversion"
)
return single_cfg
|
Pull out a single config set from a parameters_out namelist.
This function returns a single file with the config that needs to be passed to
MAGICC in order to do the same run as is represented by the values in
``parameters_out``.
Parameters
----------
parameters_out : dict, f90nml.Namelist
The parameters to dump
namelist_to_read : str
The namelist to read from the file.
Returns
-------
:obj:`f90nml.Namelist`
An f90nml object with the cleaned, read out config.
Examples
--------
>>> cfg = pull_cfg_from_parameters_out(magicc.metadata["parameters"])
>>> cfg.write("/somewhere/else/ANOTHERNAME.cfg")
|
entailment
|
def pull_cfg_from_parameters_out_file(
parameters_out_file, namelist_to_read="nml_allcfgs"
):
"""Pull out a single config set from a MAGICC ``PARAMETERS.OUT`` file.
This function reads in the ``PARAMETERS.OUT`` file and returns a single file with
the config that needs to be passed to MAGICC in order to do the same run as is
represented by the values in ``PARAMETERS.OUT``.
Parameters
----------
parameters_out_file : str
The ``PARAMETERS.OUT`` file to read
namelist_to_read : str
The namelist to read from the file.
Returns
-------
:obj:`f90nml.Namelist`
An f90nml object with the cleaned, read out config.
Examples
--------
>>> cfg = pull_cfg_from_parameters_out_file("PARAMETERS.OUT")
>>> cfg.write("/somewhere/else/ANOTHERNAME.cfg")
"""
parameters_out = read_cfg_file(parameters_out_file)
return pull_cfg_from_parameters_out(
parameters_out, namelist_to_read=namelist_to_read
)
|
Pull out a single config set from a MAGICC ``PARAMETERS.OUT`` file.
This function reads in the ``PARAMETERS.OUT`` file and returns a single file with
the config that needs to be passed to MAGICC in order to do the same run as is
represented by the values in ``PARAMETERS.OUT``.
Parameters
----------
parameters_out_file : str
The ``PARAMETERS.OUT`` file to read
namelist_to_read : str
The namelist to read from the file.
Returns
-------
:obj:`f90nml.Namelist`
An f90nml object with the cleaned, read out config.
Examples
--------
>>> cfg = pull_cfg_from_parameters_out_file("PARAMETERS.OUT")
>>> cfg.write("/somewhere/else/ANOTHERNAME.cfg")
|
entailment
|
def get_generic_rcp_name(inname):
"""Convert an RCP name into the generic Pymagicc RCP name
The conversion is case insensitive.
Parameters
----------
inname : str
The name for which to get the generic Pymagicc RCP name
Returns
-------
str
The generic Pymagicc RCP name
Examples
--------
>>> get_generic_rcp_name("RCP3PD")
"rcp26"
"""
# TODO: move into OpenSCM
mapping = {
"rcp26": "rcp26",
"rcp3pd": "rcp26",
"rcp45": "rcp45",
"rcp6": "rcp60",
"rcp60": "rcp60",
"rcp85": "rcp85",
}
try:
return mapping[inname.lower()]
except KeyError:
error_msg = "No generic name for input: {}".format(inname)
raise ValueError(error_msg)
|
Convert an RCP name into the generic Pymagicc RCP name
The conversion is case insensitive.
Parameters
----------
inname : str
The name for which to get the generic Pymagicc RCP name
Returns
-------
str
The generic Pymagicc RCP name
Examples
--------
>>> get_generic_rcp_name("RCP3PD")
"rcp26"
|
entailment
|
def join_timeseries(base, overwrite, join_linear=None):
"""Join two sets of timeseries
Parameters
----------
base : :obj:`MAGICCData`, :obj:`pd.DataFrame`, filepath
Base timeseries to use. If a filepath, the data will first be loaded from disk.
overwrite : :obj:`MAGICCData`, :obj:`pd.DataFrame`, filepath
Timeseries to join onto base. Any points which are in both `base` and
`overwrite` will be taken from `overwrite`. If a filepath, the data will first
be loaded from disk.
join_linear : tuple of len(2)
A list/array which specifies the period over which the two timeseries should
be joined. The first element is the start time of the join period, the second
element is the end time of the join period. In the join period (excluding the
start and end times), output data will be a linear interpolation between (the
annually interpolated) `base` and `overwrite` data. If None, no linear join
will be done and any points in (the annually interpolated) `overwrite` data
will simply overwrite any points in `base`.
Returns
-------
:obj:`MAGICCData`
The joint timeseries. The resulting data is linearly interpolated onto annual steps
"""
if join_linear is not None:
if len(join_linear) != 2:
raise ValueError("join_linear must have a length of 2")
if isinstance(base, str):
base = MAGICCData(base)
elif isinstance(base, MAGICCData):
base = deepcopy(base)
if isinstance(overwrite, str):
overwrite = MAGICCData(overwrite)
elif isinstance(overwrite, MAGICCData):
overwrite = deepcopy(overwrite)
result = _join_timeseries_mdata(base, overwrite, join_linear)
return MAGICCData(result)
|
Join two sets of timeseries
Parameters
----------
base : :obj:`MAGICCData`, :obj:`pd.DataFrame`, filepath
Base timeseries to use. If a filepath, the data will first be loaded from disk.
overwrite : :obj:`MAGICCData`, :obj:`pd.DataFrame`, filepath
Timeseries to join onto base. Any points which are in both `base` and
`overwrite` will be taken from `overwrite`. If a filepath, the data will first
be loaded from disk.
join_linear : tuple of len(2)
A list/array which specifies the period over which the two timeseries should
be joined. The first element is the start time of the join period, the second
element is the end time of the join period. In the join period (excluding the
start and end times), output data will be a linear interpolation between (the
annually interpolated) `base` and `overwrite` data. If None, no linear join
will be done and any points in (the annually interpolated) `overwrite` data
will simply overwrite any points in `base`.
Returns
-------
:obj:`MAGICCData`
The joint timeseries. The resulting data is linearly interpolated onto annual steps
|
entailment
|
def read_scen_file(
filepath,
columns={
"model": ["unspecified"],
"scenario": ["unspecified"],
"climate_model": ["unspecified"],
},
**kwargs
):
"""
Read a MAGICC .SCEN file.
Parameters
----------
filepath : str
Filepath of the .SCEN file to read
columns : dict
Passed to ``__init__`` method of MAGICCData. See
``MAGICCData.__init__`` for details.
kwargs
Passed to ``__init__`` method of MAGICCData. See
``MAGICCData.__init__`` for details.
Returns
-------
:obj:`pymagicc.io.MAGICCData`
``MAGICCData`` object containing the data and metadata.
"""
mdata = MAGICCData(filepath, columns=columns, **kwargs)
return mdata
|
Read a MAGICC .SCEN file.
Parameters
----------
filepath : str
Filepath of the .SCEN file to read
columns : dict
Passed to ``__init__`` method of MAGICCData. See
``MAGICCData.__init__`` for details.
kwargs
Passed to ``__init__`` method of MAGICCData. See
``MAGICCData.__init__`` for details.
Returns
-------
:obj:`pymagicc.io.MAGICCData`
``MAGICCData`` object containing the data and metadata.
|
entailment
|
def _get_openscm_var_from_filepath(filepath):
"""
Determine the OpenSCM variable from a filepath.
Uses MAGICC's internal, implicit, filenaming conventions.
Parameters
----------
filepath : str
Filepath from which to determine the OpenSCM variable.
Returns
-------
str
The OpenSCM variable implied by the filepath.
"""
reader = determine_tool(filepath, "reader")(filepath)
openscm_var = convert_magicc7_to_openscm_variables(
convert_magicc6_to_magicc7_variables(reader._get_variable_from_filepath())
)
return openscm_var
|
Determine the OpenSCM variable from a filepath.
Uses MAGICC's internal, implicit, filenaming conventions.
Parameters
----------
filepath : str
Filepath from which to determine the OpenSCM variable.
Returns
-------
str
The OpenSCM variable implied by the filepath.
|
entailment
|
def _find_nml(self):
"""
Find the start and end of the embedded namelist.
Returns
-------
(int, int)
start and end index for the namelist
"""
nml_start = None
nml_end = None
for i in range(len(self.lines)):
if self.lines[i].strip().startswith("&"):
nml_start = i
if self.lines[i].strip().startswith("/"):
nml_end = i
assert (
nml_start is not None and nml_end is not None
), "Could not find namelist within {}".format(self.filepath)
return nml_end, nml_start
|
Find the start and end of the embedded namelist.
Returns
-------
(int, int)
start and end index for the namelist
|
entailment
|
def process_data(self, stream, metadata):
"""
Extract the tabulated data from the input file.
Parameters
----------
stream : Streamlike object
A Streamlike object (nominally StringIO)
containing the table to be extracted
metadata : dict
Metadata read in from the header and the namelist
Returns
-------
(pandas.DataFrame, dict)
The first element contains the data, processed to the standard
MAGICCData format.
The second element is th updated metadata based on the processing performed.
"""
ch, metadata = self._get_column_headers_and_update_metadata(stream, metadata)
df = self._convert_data_block_and_headers_to_df(stream)
return df, metadata, ch
|
Extract the tabulated data from the input file.
Parameters
----------
stream : Streamlike object
A Streamlike object (nominally StringIO)
containing the table to be extracted
metadata : dict
Metadata read in from the header and the namelist
Returns
-------
(pandas.DataFrame, dict)
The first element contains the data, processed to the standard
MAGICCData format.
The second element is th updated metadata based on the processing performed.
|
entailment
|
def _convert_data_block_and_headers_to_df(self, stream):
"""
stream : Streamlike object
A Streamlike object (nominally StringIO) containing the data to be
extracted
ch : dict
Column headers to use for the output pd.DataFrame
Returns
-------
:obj:`pd.DataFrame`
Dataframe with processed datablock
"""
df = pd.read_csv(
stream,
skip_blank_lines=True,
delim_whitespace=True,
header=None,
index_col=0,
)
if isinstance(df.index, pd.core.indexes.numeric.Float64Index):
df.index = df.index.to_series().round(3)
# reset the columns to be 0..n instead of starting at 1
df.columns = list(range(len(df.columns)))
return df
|
stream : Streamlike object
A Streamlike object (nominally StringIO) containing the data to be
extracted
ch : dict
Column headers to use for the output pd.DataFrame
Returns
-------
:obj:`pd.DataFrame`
Dataframe with processed datablock
|
entailment
|
def _get_variable_from_filepath(self):
"""
Determine the file variable from the filepath.
Returns
-------
str
Best guess of variable name from the filepath
"""
try:
return self.regexp_capture_variable.search(self.filepath).group(1)
except AttributeError:
self._raise_cannot_determine_variable_from_filepath_error()
|
Determine the file variable from the filepath.
Returns
-------
str
Best guess of variable name from the filepath
|
entailment
|
def process_header(self, header):
"""
Parse the header for additional metadata.
Parameters
----------
header : str
All the lines in the header.
Returns
-------
dict
The metadata in the header.
"""
metadata = {}
for line in header.split("\n"):
line = line.strip()
for tag in self.header_tags:
tag_text = "{}:".format(tag)
if line.lower().startswith(tag_text):
metadata[tag] = line[len(tag_text) + 1 :].strip()
return metadata
|
Parse the header for additional metadata.
Parameters
----------
header : str
All the lines in the header.
Returns
-------
dict
The metadata in the header.
|
entailment
|
def _read_data_header_line(self, stream, expected_header):
"""Read a data header line, ensuring that it starts with the expected header
Parameters
----------
stream : :obj:`StreamIO`
Stream object containing the text to read
expected_header : str, list of strs
Expected header of the data header line
"""
pos = stream.tell()
expected_header = (
[expected_header] if isinstance(expected_header, str) else expected_header
)
for exp_hd in expected_header:
tokens = stream.readline().split()
try:
assert tokens[0] == exp_hd
return tokens[1:]
except AssertionError:
stream.seek(pos)
continue
assertion_msg = "Expected a header token of {}, got {}".format(
expected_header, tokens[0]
)
raise AssertionError(assertion_msg)
|
Read a data header line, ensuring that it starts with the expected header
Parameters
----------
stream : :obj:`StreamIO`
Stream object containing the text to read
expected_header : str, list of strs
Expected header of the data header line
|
entailment
|
def read_chunk(self, t):
"""
Read out the next chunk of memory
Values in fortran binary streams begin and end with the number of bytes
:param t: Data type (same format as used by struct).
:return: Numpy array if the variable is an array, otherwise a scalar.
"""
size = self.data[self.pos : self.pos + 4].cast("i")[0]
d = self.data[self.pos + 4 : self.pos + 4 + size]
assert (
self.data[self.pos + 4 + size : self.pos + 4 + size + 4].cast("i")[0]
== size
)
self.pos = self.pos + 4 + size + 4
res = np.array(d.cast(t))
# Return as a scalar or a numpy array if it is an array
if res.size == 1:
return res[0]
return res
|
Read out the next chunk of memory
Values in fortran binary streams begin and end with the number of bytes
:param t: Data type (same format as used by struct).
:return: Numpy array if the variable is an array, otherwise a scalar.
|
entailment
|
def process_data(self, stream, metadata):
"""
Extract the tabulated data from the input file
# Arguments
stream (Streamlike object): A Streamlike object (nominally StringIO)
containing the table to be extracted
metadata (dict): metadata read in from the header and the namelist
# Returns
df (pandas.DataFrame): contains the data, processed to the standard
MAGICCData format
metadata (dict): updated metadata based on the processing performed
"""
index = np.arange(metadata["firstyear"], metadata["lastyear"] + 1)
# The first variable is the global values
globe = stream.read_chunk("d")
assert len(globe) == len(index)
regions = stream.read_chunk("d")
num_regions = int(len(regions) / len(index))
regions = regions.reshape((-1, num_regions), order="F")
data = np.concatenate((globe[:, np.newaxis], regions), axis=1)
df = pd.DataFrame(data, index=index)
if isinstance(df.index, pd.core.indexes.numeric.Float64Index):
df.index = df.index.to_series().round(3)
df.index.name = "time"
regions = [
"World",
"World|Northern Hemisphere|Ocean",
"World|Northern Hemisphere|Land",
"World|Southern Hemisphere|Ocean",
"World|Southern Hemisphere|Land",
]
variable = convert_magicc6_to_magicc7_variables(
self._get_variable_from_filepath()
)
variable = convert_magicc7_to_openscm_variables(variable)
column_headers = {
"variable": [variable] * (num_regions + 1),
"region": regions,
"unit": ["unknown"] * len(regions),
"todo": ["SET"] * len(regions),
}
return df, metadata, self._set_column_defaults(column_headers)
|
Extract the tabulated data from the input file
# Arguments
stream (Streamlike object): A Streamlike object (nominally StringIO)
containing the table to be extracted
metadata (dict): metadata read in from the header and the namelist
# Returns
df (pandas.DataFrame): contains the data, processed to the standard
MAGICCData format
metadata (dict): updated metadata based on the processing performed
|
entailment
|
def process_header(self, data):
"""
Reads the first part of the file to get some essential metadata
# Returns
return (dict): the metadata in the header
"""
metadata = {
"datacolumns": data.read_chunk("I"),
"firstyear": data.read_chunk("I"),
"lastyear": data.read_chunk("I"),
"annualsteps": data.read_chunk("I"),
}
if metadata["annualsteps"] != 1:
raise InvalidTemporalResError(
"{}: Only annual files can currently be processed".format(self.filepath)
)
return metadata
|
Reads the first part of the file to get some essential metadata
# Returns
return (dict): the metadata in the header
|
entailment
|
def write(self, magicc_input, filepath):
"""
Write a MAGICC input file from df and metadata
Parameters
----------
magicc_input : :obj:`pymagicc.io.MAGICCData`
MAGICCData object which holds the data to write
filepath : str
Filepath of the file to write to.
"""
self._filepath = filepath
# TODO: make copy attribute for MAGICCData
self.minput = deepcopy(magicc_input)
self.data_block = self._get_data_block()
output = StringIO()
output = self._write_header(output)
output = self._write_namelist(output)
output = self._write_datablock(output)
with open(
filepath, "w", encoding="utf-8", newline=self._newline_char
) as output_file:
output.seek(0)
copyfileobj(output, output_file)
|
Write a MAGICC input file from df and metadata
Parameters
----------
magicc_input : :obj:`pymagicc.io.MAGICCData`
MAGICCData object which holds the data to write
filepath : str
Filepath of the file to write to.
|
entailment
|
def append(self, other, inplace=False, **kwargs):
"""
Append any input which can be converted to MAGICCData to self.
Parameters
----------
other : MAGICCData, pd.DataFrame, pd.Series, str
Source of data to append.
inplace : bool
If True, append ``other`` inplace, otherwise return a new ``MAGICCData``
instance.
**kwargs
Passed to ``MAGICCData`` constructor (only used if ``MAGICCData`` is not a
``MAGICCData`` instance).
"""
if not isinstance(other, MAGICCData):
other = MAGICCData(other, **kwargs)
if inplace:
super().append(other, inplace=inplace)
self.metadata.update(other.metadata)
else:
res = super().append(other, inplace=inplace)
res.metadata = deepcopy(self.metadata)
res.metadata.update(other.metadata)
return res
|
Append any input which can be converted to MAGICCData to self.
Parameters
----------
other : MAGICCData, pd.DataFrame, pd.Series, str
Source of data to append.
inplace : bool
If True, append ``other`` inplace, otherwise return a new ``MAGICCData``
instance.
**kwargs
Passed to ``MAGICCData`` constructor (only used if ``MAGICCData`` is not a
``MAGICCData`` instance).
|
entailment
|
def write(self, filepath, magicc_version):
"""
Write an input file to disk.
Parameters
----------
filepath : str
Filepath of the file to write.
magicc_version : int
The MAGICC version for which we want to write files. MAGICC7 and MAGICC6
namelists are incompatible hence we need to know which one we're writing
for.
"""
writer = determine_tool(filepath, "writer")(magicc_version=magicc_version)
writer.write(self, filepath)
|
Write an input file to disk.
Parameters
----------
filepath : str
Filepath of the file to write.
magicc_version : int
The MAGICC version for which we want to write files. MAGICC7 and MAGICC6
namelists are incompatible hence we need to know which one we're writing
for.
|
entailment
|
def validate_python_identifier_attributes(fully_qualified_name: str, spec: Dict[str, Any],
*attributes: str) -> List[InvalidIdentifierError]:
""" Validates a set of attributes as identifiers in a spec """
errors: List[InvalidIdentifierError] = []
checks: List[Tuple[Callable, InvalidIdentifierError.Reason]] = [
(lambda x: x.startswith('_'), InvalidIdentifierError.Reason.STARTS_WITH_UNDERSCORE),
(lambda x: x.startswith('run_'), InvalidIdentifierError.Reason.STARTS_WITH_RUN),
(lambda x: not x.isidentifier(), InvalidIdentifierError.Reason.INVALID_PYTHON_IDENTIFIER),
]
for attribute in attributes:
if attribute not in spec or spec.get(ATTRIBUTE_INTERNAL, False):
continue
for check in checks:
if check[0](spec[attribute]):
errors.append(
InvalidIdentifierError(fully_qualified_name, spec, attribute, check[1]))
break
return errors
|
Validates a set of attributes as identifiers in a spec
|
entailment
|
def validate_required_attributes(fully_qualified_name: str, spec: Dict[str, Any],
*attributes: str) -> List[RequiredAttributeError]:
""" Validates to ensure that a set of attributes are present in spec """
return [
RequiredAttributeError(fully_qualified_name, spec, attribute)
for attribute in attributes
if attribute not in spec
]
|
Validates to ensure that a set of attributes are present in spec
|
entailment
|
def validate_empty_attributes(fully_qualified_name: str, spec: Dict[str, Any],
*attributes: str) -> List[EmptyAttributeError]:
""" Validates to ensure that a set of attributes do not contain empty values """
return [
EmptyAttributeError(fully_qualified_name, spec, attribute)
for attribute in attributes
if not spec.get(attribute, None)
]
|
Validates to ensure that a set of attributes do not contain empty values
|
entailment
|
def validate_number_attribute(
fully_qualified_name: str,
spec: Dict[str, Any],
attribute: str,
value_type: Union[Type[int], Type[float]] = int,
minimum: Optional[Union[int, float]] = None,
maximum: Optional[Union[int, float]] = None) -> Optional[InvalidNumberError]:
""" Validates to ensure that the value is a number of the specified type, and lies with the specified range """
if attribute not in spec:
return
try:
value = value_type(spec[attribute])
if (minimum is not None and value < minimum) or (maximum is not None and value > maximum):
raise None
except:
return InvalidNumberError(fully_qualified_name, spec, attribute, value_type, minimum,
maximum)
|
Validates to ensure that the value is a number of the specified type, and lies with the specified range
|
entailment
|
def validate_enum_attribute(fully_qualified_name: str, spec: Dict[str, Any], attribute: str,
candidates: Set[Union[str, int, float]]) -> Optional[InvalidValueError]:
""" Validates to ensure that the value of an attribute lies within an allowed set of candidates """
if attribute not in spec:
return
if spec[attribute] not in candidates:
return InvalidValueError(fully_qualified_name, spec, attribute, candidates)
|
Validates to ensure that the value of an attribute lies within an allowed set of candidates
|
entailment
|
def parse(key_string: str) -> 'Key':
""" Parses a flat key string and returns a key """
parts = key_string.split(Key.PARTITION)
key_type = KeyType.DIMENSION
if parts[3]:
key_type = KeyType.TIMESTAMP
return Key(key_type, parts[0], parts[1], parts[2].split(Key.DIMENSION_PARTITION)
if parts[2] else [],
parser.parse(parts[3]) if parts[3] else None)
|
Parses a flat key string and returns a key
|
entailment
|
def parse_sort_key(identity: str, sort_key_string: str) -> 'Key':
""" Parses a flat key string and returns a key """
parts = sort_key_string.split(Key.PARTITION)
key_type = KeyType.DIMENSION
if parts[2]:
key_type = KeyType.TIMESTAMP
return Key(key_type, identity, parts[0], parts[1].split(Key.DIMENSION_PARTITION)
if parts[1] else [],
parser.parse(parts[2]) if parts[2] else None)
|
Parses a flat key string and returns a key
|
entailment
|
def starts_with(self, other: 'Key') -> bool:
"""
Checks if this key starts with the other key provided. Returns False if key_type, identity
or group are different.
For `KeyType.TIMESTAMP` returns True.
For `KeyType.DIMENSION` does prefix match between the two dimensions property.
"""
if (self.key_type, self.identity, self.group) != (other.key_type, other.identity,
other.group):
return False
if self.key_type == KeyType.TIMESTAMP:
return True
if self.key_type == KeyType.DIMENSION:
if len(self.dimensions) < len(other.dimensions):
return False
return self.dimensions[0:len(other.dimensions)] == other.dimensions
|
Checks if this key starts with the other key provided. Returns False if key_type, identity
or group are different.
For `KeyType.TIMESTAMP` returns True.
For `KeyType.DIMENSION` does prefix match between the two dimensions property.
|
entailment
|
def _evaluate_dimension_fields(self) -> bool:
"""
Evaluates the dimension fields. Returns False if any of the fields could not be evaluated.
"""
for _, item in self._dimension_fields.items():
item.run_evaluate()
if item.eval_error:
return False
return True
|
Evaluates the dimension fields. Returns False if any of the fields could not be evaluated.
|
entailment
|
def _compare_dimensions_to_fields(self) -> bool:
""" Compares the dimension field values to the value in regular fields."""
for name, item in self._dimension_fields.items():
if item.value != self._nested_items[name].value:
return False
return True
|
Compares the dimension field values to the value in regular fields.
|
entailment
|
def _key(self):
""" Generates the Key object based on dimension fields. """
return Key(self._schema.key_type, self._identity, self._name,
[str(item.value) for item in self._dimension_fields.values()])
|
Generates the Key object based on dimension fields.
|
entailment
|
def run(scenario, magicc_version=6, **kwargs):
"""
Run a MAGICC scenario and return output data and (optionally) config parameters.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run
magicc_version : int
MAGICC version to use for the run
**kwargs
Parameters overwriting default parameters
Raises
------
ValueError
If the magicc_version is not available
Returns
-------
output : :obj:`pymagicc.io.MAGICCData`
Output of the run with the data in the ``df`` attribute and parameters and
other metadata in the ``metadata attribute``
"""
if magicc_version == 6:
magicc_cls = MAGICC6
elif magicc_version == 7:
magicc_cls = MAGICC7
else:
raise ValueError("MAGICC version {} is not available".format(magicc_version))
with magicc_cls() as magicc:
results = magicc.run(scenario=scenario, **kwargs)
return results
|
Run a MAGICC scenario and return output data and (optionally) config parameters.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run
magicc_version : int
MAGICC version to use for the run
**kwargs
Parameters overwriting default parameters
Raises
------
ValueError
If the magicc_version is not available
Returns
-------
output : :obj:`pymagicc.io.MAGICCData`
Output of the run with the data in the ``df`` attribute and parameters and
other metadata in the ``metadata attribute``
|
entailment
|
def run_evaluate(self, block: TimeAggregate) -> bool:
"""
Evaluates the anchor condition against the specified block.
:param block: Block to run the anchor condition against.
:return: True, if the anchor condition is met, otherwise, False.
"""
if self._anchor.evaluate_anchor(block, self._evaluation_context):
try:
self.run_reset()
self._evaluation_context.global_add('anchor', block)
self._evaluate()
self._anchor.add_condition_met()
return True
finally:
self._evaluation_context.global_remove('anchor')
return False
|
Evaluates the anchor condition against the specified block.
:param block: Block to run the anchor condition against.
:return: True, if the anchor condition is met, otherwise, False.
|
entailment
|
def extend_schema_spec(self) -> None:
""" Injects the block start and end times """
super().extend_schema_spec()
if self.ATTRIBUTE_FIELDS in self._spec:
# Add new fields to the schema spec. Since `_identity` is added by the super, new elements are added after
predefined_field = self._build_time_fields_spec(self._spec[self.ATTRIBUTE_NAME])
self._spec[self.ATTRIBUTE_FIELDS][1:1] = predefined_field
# Add new field schema to the schema loader
for field_schema in predefined_field:
self.schema_loader.add_schema_spec(field_schema, self.fully_qualified_name)
|
Injects the block start and end times
|
entailment
|
def _build_time_fields_spec(name_in_context: str) -> List[Dict[str, Any]]:
"""
Constructs the spec for predefined fields that are to be included in the master spec prior to schema load
:param name_in_context: Name of the current object in the context
:return:
"""
return [
{
'Name': '_start_time',
'Type': Type.DATETIME,
'Value': ('time if {aggregate}._start_time is None else time '
'if time < {aggregate}._start_time else {aggregate}._start_time'
).format(aggregate=name_in_context),
ATTRIBUTE_INTERNAL: True
},
{
'Name': '_end_time',
'Type': Type.DATETIME,
'Value': ('time if {aggregate}._end_time is None else time '
'if time > {aggregate}._end_time else {aggregate}._end_time'
).format(aggregate=name_in_context),
ATTRIBUTE_INTERNAL: True
},
]
|
Constructs the spec for predefined fields that are to be included in the master spec prior to schema load
:param name_in_context: Name of the current object in the context
:return:
|
entailment
|
def apply_string_substitutions(
inputs,
substitutions,
inverse=False,
case_insensitive=False,
unused_substitutions="ignore",
):
"""Apply a number of substitutions to a string(s).
The substitutions are applied effectively all at once. This means that conflicting
substitutions don't interact. Where substitutions are conflicting, the one which
is longer takes precedance. This is confusing so we recommend that you look at
the examples.
Parameters
----------
inputs : str, list of str
The string(s) to which we want to apply the substitutions.
substitutions : dict
The substitutions we wish to make. The keys are the strings we wish to
substitute, the values are the strings which we want to appear in the output
strings.
inverse : bool
If True, do the substitutions the other way around i.e. use the keys as the
strings we want to appear in the output strings and the values as the strings
we wish to substitute.
case_insensitive : bool
If True, the substitutions will be made in a case insensitive way.
unused_substitutions : {"ignore", "warn", "raise"}, default ignore
Behaviour when one or more of the inputs does not have a corresponding
substitution. If "ignore", nothing happens. If "warn", a warning is issued. If
"raise", an error is raised. See the examples.
Returns
-------
``type(input)``
The input with substitutions performed.
Examples
--------
>>> apply_string_substitutions("Hello JimBob", {"Jim": "Bob"})
'Hello BobBob'
>>> apply_string_substitutions("Hello JimBob", {"Jim": "Bob"}, inverse=True)
'Hello JimJim'
>>> apply_string_substitutions(["Hello JimBob", "Jim says, 'Hi Bob'"], {"Jim": "Bob"})
['Hello BobBob', "Bob says, 'Hi Bob'"]
>>> apply_string_substitutions(["Hello JimBob", "Jim says, 'Hi Bob'"], {"Jim": "Bob"}, inverse=True)
['Hello JimJim', "Jim says, 'Hi Jim'"]
>>> apply_string_substitutions("Muttons Butter", {"M": "B", "Button": "Zip"})
'Buttons Butter'
# Substitutions don't cascade. If they did, Muttons would become Buttons, then the
# substitutions "Button" --> "Zip" would be applied and we would end up with
# "Zips Butter".
>>> apply_string_substitutions("Muttons Butter", {"Mutton": "Gutter", "tt": "zz"})
'Gutters Buzzer'
# Longer substitutions take precedent. Hence Mutton becomes Gutter, not Muzzon.
>>> apply_string_substitutions("Butter", {"buTTer": "Gutter"}, case_insensitive=True)
'Gutter'
>>> apply_string_substitutions("Butter", {"teeth": "tooth"})
'Butter'
>>> apply_string_substitutions("Butter", {"teeth": "tooth"}, unused_substitutions="ignore")
'Butter'
>>> apply_string_substitutions("Butter", {"teeth": "tooth"}, unused_substitutions="warn")
...pymagicc/utils.py:50: UserWarning: No substitution available for {'Butter'} warnings.warn(msg)
'Butter'
>>> apply_string_substitutions("Butter", {"teeth": "tooth"}, unused_substitutions="raise")
ValueError: No substitution available for {'Butter'}
"""
if inverse:
substitutions = {v: k for k, v in substitutions.items()}
# only possible to have conflicting substitutions when case insensitive
if case_insensitive:
_check_duplicate_substitutions(substitutions)
if unused_substitutions != "ignore":
_check_unused_substitutions(
substitutions, inputs, unused_substitutions, case_insensitive
)
compiled_regexp = _compile_replacement_regexp(
substitutions, case_insensitive=case_insensitive
)
inputs_return = deepcopy(inputs)
if isinstance(inputs_return, str):
inputs_return = _multiple_replace(inputs_return, substitutions, compiled_regexp)
else:
inputs_return = [
_multiple_replace(v, substitutions, compiled_regexp) for v in inputs_return
]
return inputs_return
|
Apply a number of substitutions to a string(s).
The substitutions are applied effectively all at once. This means that conflicting
substitutions don't interact. Where substitutions are conflicting, the one which
is longer takes precedance. This is confusing so we recommend that you look at
the examples.
Parameters
----------
inputs : str, list of str
The string(s) to which we want to apply the substitutions.
substitutions : dict
The substitutions we wish to make. The keys are the strings we wish to
substitute, the values are the strings which we want to appear in the output
strings.
inverse : bool
If True, do the substitutions the other way around i.e. use the keys as the
strings we want to appear in the output strings and the values as the strings
we wish to substitute.
case_insensitive : bool
If True, the substitutions will be made in a case insensitive way.
unused_substitutions : {"ignore", "warn", "raise"}, default ignore
Behaviour when one or more of the inputs does not have a corresponding
substitution. If "ignore", nothing happens. If "warn", a warning is issued. If
"raise", an error is raised. See the examples.
Returns
-------
``type(input)``
The input with substitutions performed.
Examples
--------
>>> apply_string_substitutions("Hello JimBob", {"Jim": "Bob"})
'Hello BobBob'
>>> apply_string_substitutions("Hello JimBob", {"Jim": "Bob"}, inverse=True)
'Hello JimJim'
>>> apply_string_substitutions(["Hello JimBob", "Jim says, 'Hi Bob'"], {"Jim": "Bob"})
['Hello BobBob', "Bob says, 'Hi Bob'"]
>>> apply_string_substitutions(["Hello JimBob", "Jim says, 'Hi Bob'"], {"Jim": "Bob"}, inverse=True)
['Hello JimJim', "Jim says, 'Hi Jim'"]
>>> apply_string_substitutions("Muttons Butter", {"M": "B", "Button": "Zip"})
'Buttons Butter'
# Substitutions don't cascade. If they did, Muttons would become Buttons, then the
# substitutions "Button" --> "Zip" would be applied and we would end up with
# "Zips Butter".
>>> apply_string_substitutions("Muttons Butter", {"Mutton": "Gutter", "tt": "zz"})
'Gutters Buzzer'
# Longer substitutions take precedent. Hence Mutton becomes Gutter, not Muzzon.
>>> apply_string_substitutions("Butter", {"buTTer": "Gutter"}, case_insensitive=True)
'Gutter'
>>> apply_string_substitutions("Butter", {"teeth": "tooth"})
'Butter'
>>> apply_string_substitutions("Butter", {"teeth": "tooth"}, unused_substitutions="ignore")
'Butter'
>>> apply_string_substitutions("Butter", {"teeth": "tooth"}, unused_substitutions="warn")
...pymagicc/utils.py:50: UserWarning: No substitution available for {'Butter'} warnings.warn(msg)
'Butter'
>>> apply_string_substitutions("Butter", {"teeth": "tooth"}, unused_substitutions="raise")
ValueError: No substitution available for {'Butter'}
|
entailment
|
def get_range(self,
base_key: Key,
start_time: datetime,
end_time: datetime = None,
count: int = 0) -> List[Tuple[Key, Any]]:
"""
Returns the list of items from the store based on the given time range or count.
:param base_key: Items which don't start with the base_key are filtered out.
:param start_time: Start time to for the range query
:param end_time: End time of the range query. If None count is used.
:param count: The number of items to be returned. Used if end_time is not specified.
"""
if end_time and count:
raise ValueError('Only one of `end` or `count` can be set')
if count:
end_time = datetime.min.replace(
tzinfo=timezone.utc) if count < 0 else datetime.max.replace(tzinfo=timezone.utc)
end_time = self._add_timezone_if_required(end_time)
start_time = self._add_timezone_if_required(start_time)
if end_time < start_time:
start_time, end_time = end_time, start_time
if base_key.key_type == KeyType.TIMESTAMP:
start_key = Key(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], start_time)
end_key = Key(KeyType.TIMESTAMP, base_key.identity, base_key.group, [], end_time)
return self._get_range_timestamp_key(start_key, end_key, count)
else:
return self._get_range_dimension_key(base_key, start_time, end_time, count)
|
Returns the list of items from the store based on the given time range or count.
:param base_key: Items which don't start with the base_key are filtered out.
:param start_time: Start time to for the range query
:param end_time: End time of the range query. If None count is used.
:param count: The number of items to be returned. Used if end_time is not specified.
|
entailment
|
def _get_range_timestamp_key(self, start: Key, end: Key,
count: int = 0) -> List[Tuple[Key, Any]]:
"""
Returns the list of items from the store based on the given time range or count.
This is used when the key being used is a TIMESTAMP key.
"""
raise NotImplementedError()
|
Returns the list of items from the store based on the given time range or count.
This is used when the key being used is a TIMESTAMP key.
|
entailment
|
def _get_range_dimension_key(self,
base_key: Key,
start_time: datetime,
end_time: datetime,
count: int = 0) -> List[Tuple[Key, Any]]:
"""
Returns the list of items from the store based on the given time range or count.
This is used when the key being used is a DIMENSION key.
"""
raise NotImplementedError()
|
Returns the list of items from the store based on the given time range or count.
This is used when the key being used is a DIMENSION key.
|
entailment
|
def _restrict_items_to_count(items: List[Tuple[Key, Any]], count: int) -> List[Tuple[Key, Any]]:
"""
Restricts items to count number if len(items) is larger than abs(count). This function
assumes that items is sorted by time.
:param items: The items to restrict.
:param count: The number of items returned.
"""
if abs(count) > len(items):
count = Store._sign(count) * len(items)
if count < 0:
return items[count:]
else:
return items[:count]
|
Restricts items to count number if len(items) is larger than abs(count). This function
assumes that items is sorted by time.
:param items: The items to restrict.
:param count: The number of items returned.
|
entailment
|
def build_expression(self, attribute: str) -> Optional[Expression]:
""" Builds an expression object. Adds an error if expression creation has errors. """
expression_string = self._spec.get(attribute, None)
if expression_string:
try:
return Expression(str(expression_string))
except Exception as err:
self.add_errors(
InvalidExpressionError(self.fully_qualified_name, self._spec, attribute, err))
return None
|
Builds an expression object. Adds an error if expression creation has errors.
|
entailment
|
def add_errors(self, *errors: Union[BaseSchemaError, List[BaseSchemaError]]) -> None:
""" Adds errors to the error repository in schema loader """
self.schema_loader.add_errors(*errors)
|
Adds errors to the error repository in schema loader
|
entailment
|
def validate_required_attributes(self, *attributes: str) -> None:
""" Validates that the schema contains a series of required attributes """
self.add_errors(
validate_required_attributes(self.fully_qualified_name, self._spec, *attributes))
|
Validates that the schema contains a series of required attributes
|
entailment
|
def validate_number_attribute(self,
attribute: str,
value_type: Union[Type[int], Type[float]] = int,
minimum: Optional[Union[int, float]] = None,
maximum: Optional[Union[int, float]] = None) -> None:
""" Validates that the attribute contains a numeric value within boundaries if specified """
self.add_errors(
validate_number_attribute(self.fully_qualified_name, self._spec, attribute, value_type,
minimum, maximum))
|
Validates that the attribute contains a numeric value within boundaries if specified
|
entailment
|
def validate_enum_attribute(self, attribute: str,
candidates: Set[Union[str, int, float]]) -> None:
""" Validates that the attribute value is among the candidates """
self.add_errors(
validate_enum_attribute(self.fully_qualified_name, self._spec, attribute, candidates))
|
Validates that the attribute value is among the candidates
|
entailment
|
def validate_schema_spec(self) -> None:
""" Contains the validation routines that are to be executed as part of initialization by subclasses.
When this method is being extended, the first line should always be: ```super().validate_schema_spec()``` """
self.add_errors(
validate_empty_attributes(self.fully_qualified_name, self._spec, *self._spec.keys()))
self.add_errors(
validate_python_identifier_attributes(self.fully_qualified_name, self._spec,
self.ATTRIBUTE_NAME))
|
Contains the validation routines that are to be executed as part of initialization by subclasses.
When this method is being extended, the first line should always be: ```super().validate_schema_spec()```
|
entailment
|
def _needs_evaluation(self) -> bool:
"""
Returns True when:
1. Where clause is not specified
2. Where WHERE clause is specified and it evaluates to True
Returns false if a where clause is specified and it evaluates to False
"""
return self._schema.when is None or self._schema.when.evaluate(self._evaluation_context)
|
Returns True when:
1. Where clause is not specified
2. Where WHERE clause is specified and it evaluates to True
Returns false if a where clause is specified and it evaluates to False
|
entailment
|
def run_evaluate(self, *args, **kwargs) -> None:
"""
Evaluates the current item
:returns An evaluation result object containing the result, or reasons why
evaluation failed
"""
if self._needs_evaluation:
for _, item in self._nested_items.items():
item.run_evaluate()
|
Evaluates the current item
:returns An evaluation result object containing the result, or reasons why
evaluation failed
|
entailment
|
def _snapshot(self) -> Dict[str, Any]:
"""
Implements snapshot for collections by recursively invoking snapshot of all child items
"""
try:
return {name: item._snapshot for name, item in self._nested_items.items()}
except Exception as e:
raise SnapshotError('Error while creating snapshot for {}'.format(self._name)) from e
|
Implements snapshot for collections by recursively invoking snapshot of all child items
|
entailment
|
def run_restore(self, snapshot: Dict[Union[str, Key], Any]) -> 'BaseItemCollection':
"""
Restores the state of a collection from a snapshot
"""
try:
for name, snap in snapshot.items():
if isinstance(name, Key):
self._nested_items[name.group].run_restore(snap)
else:
self._nested_items[name].run_restore(snap)
return self
except Exception as e:
raise SnapshotError('Error while restoring snapshot: {}'.format(self._snapshot)) from e
|
Restores the state of a collection from a snapshot
|
entailment
|
def _prepare_window(self, start_time: datetime) -> None:
"""
Prepares window if any is specified.
:param start_time: The anchor block start_time from where the window
should be generated.
"""
# evaluate window first which sets the correct window in the store
store = self._schema.schema_loader.get_store(
self._schema.source.store_schema.fully_qualified_name)
if Type.is_type_equal(self._schema.window_type, Type.DAY) or Type.is_type_equal(
self._schema.window_type, Type.HOUR):
block_list = self._load_blocks(
store.get_range(
Key(self._schema.source.key_type, self._identity, self._schema.source.name),
start_time, self._get_end_time(start_time)))
else:
block_list = self._load_blocks(
store.get_range(
Key(self._schema.source.key_type, self._identity, self._schema.source.name),
start_time, None, self._schema.window_value))
self._window_source = _WindowSource(block_list)
self._validate_view()
|
Prepares window if any is specified.
:param start_time: The anchor block start_time from where the window
should be generated.
|
entailment
|
def _get_end_time(self, start_time: datetime) -> datetime:
"""
Generates the end time to be used for the store range query.
:param start_time: Start time to use as an offset to calculate the end time
based on the window type in the schema.
:return:
"""
if Type.is_type_equal(self._schema.window_type, Type.DAY):
return start_time + timedelta(days=self._schema.window_value)
elif Type.is_type_equal(self._schema.window_type, Type.HOUR):
return start_time + timedelta(hours=self._schema.window_value)
|
Generates the end time to be used for the store range query.
:param start_time: Start time to use as an offset to calculate the end time
based on the window type in the schema.
:return:
|
entailment
|
def _load_blocks(self, blocks: List[Tuple[Key, Any]]) -> List[TimeAggregate]:
"""
Converts [(Key, block)] to [BlockAggregate]
:param blocks: List of (Key, block) blocks.
:return: List of BlockAggregate
"""
return [
TypeLoader.load_item(self._schema.source.type)(self._schema.source, self._identity,
EvaluationContext()).run_restore(block)
for (_, block) in blocks
]
|
Converts [(Key, block)] to [BlockAggregate]
:param blocks: List of (Key, block) blocks.
:return: List of BlockAggregate
|
entailment
|
def execute_per_identity_records(
self,
identity: str,
records: List[TimeAndRecord],
old_state: Optional[Dict[Key, Any]] = None) -> Tuple[str, Tuple[Dict, List]]:
"""
Executes the streaming and window BTS on the given records. An option old state can provided
which initializes the state for execution. This is useful for batch execution where the
previous state is written out to storage and can be loaded for the next batch run.
:param identity: Identity of the records.
:param records: List of TimeAndRecord to be processed.
:param old_state: Streaming BTS state dictionary from a previous execution.
:return: Tuple[Identity, Tuple[Identity, Tuple[Streaming BTS state dictionary,
List of window BTS output]].
"""
schema_loader = SchemaLoader()
if records:
records.sort(key=lambda x: x[0])
else:
records = []
block_data = self._execute_stream_bts(records, identity, schema_loader, old_state)
window_data = self._execute_window_bts(identity, schema_loader)
return identity, (block_data, window_data)
|
Executes the streaming and window BTS on the given records. An option old state can provided
which initializes the state for execution. This is useful for batch execution where the
previous state is written out to storage and can be loaded for the next batch run.
:param identity: Identity of the records.
:param records: List of TimeAndRecord to be processed.
:param old_state: Streaming BTS state dictionary from a previous execution.
:return: Tuple[Identity, Tuple[Identity, Tuple[Streaming BTS state dictionary,
List of window BTS output]].
|
entailment
|
def get_per_identity_records(self, events: Iterable, data_processor: DataProcessor
) -> Generator[Tuple[str, TimeAndRecord], None, None]:
"""
Uses the given iteratable events and the data processor convert the event into a list of
Records along with its identity and time.
:param events: iteratable events.
:param data_processor: DataProcessor to process each event in events.
:return: yields Tuple[Identity, TimeAndRecord] for all Records in events,
"""
schema_loader = SchemaLoader()
stream_bts_name = schema_loader.add_schema_spec(self._stream_bts)
stream_transformer_schema: StreamingTransformerSchema = schema_loader.get_schema_object(
stream_bts_name)
for event in events:
try:
for record in data_processor.process_data(event):
try:
id = stream_transformer_schema.get_identity(record)
time = stream_transformer_schema.get_time(record)
yield (id, (time, record))
except Exception as err:
logging.error('{} in parsing Record {}.'.format(err, record))
except Exception as err:
logging.error('{} in parsing Event {}.'.format(err, event))
|
Uses the given iteratable events and the data processor convert the event into a list of
Records along with its identity and time.
:param events: iteratable events.
:param data_processor: DataProcessor to process each event in events.
:return: yields Tuple[Identity, TimeAndRecord] for all Records in events,
|
entailment
|
def to_string(self, hdr, other):
"""String representation with additional information"""
result = "%s[%s,%s" % (
hdr, self.get_type(self.type), self.get_clazz(self.clazz))
if self.unique:
result += "-unique,"
else:
result += ","
result += self.name
if other is not None:
result += ",%s]" % (other)
else:
result += "]"
return result
|
String representation with additional information
|
entailment
|
def answered_by(self, rec):
"""Returns true if the question is answered by the record"""
return self.clazz == rec.clazz and \
(self.type == rec.type or self.type == _TYPE_ANY) and \
self.name == rec.name
|
Returns true if the question is answered by the record
|
entailment
|
def reset_ttl(self, other):
"""Sets this record's TTL and created time to that of
another record."""
self.created = other.created
self.ttl = other.ttl
|
Sets this record's TTL and created time to that of
another record.
|
entailment
|
def to_string(self, other):
"""String representation with addtional information"""
arg = "%s/%s,%s" % (
self.ttl, self.get_remaining_ttl(current_time_millis()), other)
return DNSEntry.to_string(self, "record", arg)
|
String representation with addtional information
|
entailment
|
def write(self, out):
"""Used in constructing an outgoing packet"""
out.write_string(self.address, len(self.address))
|
Used in constructing an outgoing packet
|
entailment
|
def write(self, out):
"""Used in constructing an outgoing packet"""
out.write_string(self.cpu, len(self.cpu))
out.write_string(self.os, len(self.os))
|
Used in constructing an outgoing packet
|
entailment
|
def write(self, out):
"""Used in constructing an outgoing packet"""
out.write_string(self.text, len(self.text))
|
Used in constructing an outgoing packet
|
entailment
|
def set_property(self, key, value):
"""
Update only one property in the dict
"""
self.properties[key] = value
self.sync_properties()
|
Update only one property in the dict
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.