sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def write(self, out):
"""Used in constructing an outgoing packet"""
out.write_short(self.priority)
out.write_short(self.weight)
out.write_short(self.port)
out.write_name(self.server)
|
Used in constructing an outgoing packet
|
entailment
|
def read_header(self):
"""Reads header portion of packet"""
format = '!HHHHHH'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
self.id = info[0]
self.flags = info[1]
self.num_questions = info[2]
self.num_answers = info[3]
self.num_authorities = info[4]
self.num_additionals = info[5]
|
Reads header portion of packet
|
entailment
|
def read_questions(self):
"""Reads questions section of packet"""
format = '!HH'
length = struct.calcsize(format)
for i in range(0, self.num_questions):
name = self.read_name()
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
question = DNSQuestion(name, info[0], info[1])
self.questions.append(question)
|
Reads questions section of packet
|
entailment
|
def read_int(self):
"""Reads an integer from the packet"""
format = '!I'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
return info[0]
|
Reads an integer from the packet
|
entailment
|
def read_character_string(self):
"""Reads a character string from the packet"""
length = ord(self.data[self.offset])
self.offset += 1
return self.read_string(length)
|
Reads a character string from the packet
|
entailment
|
def read_string(self, len):
"""Reads a string of a given length from the packet"""
format = '!' + str(len) + 's'
length = struct.calcsize(format)
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
return info[0]
|
Reads a string of a given length from the packet
|
entailment
|
def read_others(self):
"""Reads the answers, authorities and additionals section
of the packet"""
format = '!HHiH'
length = struct.calcsize(format)
n = self.num_answers + self.num_authorities + self.num_additionals
for i in range(0, n):
domain = self.read_name()
info = struct.unpack(format,
self.data[self.offset:self.offset + length])
self.offset += length
rec = None
if info[0] == _TYPE_A:
rec = DNSAddress(domain,
info[0], info[1], info[2],
self.read_string(4))
elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR:
rec = DNSPointer(domain,
info[0], info[1], info[2],
self.read_name())
elif info[0] == _TYPE_TXT:
rec = DNSText(domain,
info[0], info[1], info[2],
self.read_string(info[3]))
elif info[0] == _TYPE_SRV:
rec = DNSService(domain,
info[0], info[1], info[2],
self.read_unsigned_short(),
self.read_unsigned_short(),
self.read_unsigned_short(),
self.read_name())
elif info[0] == _TYPE_HINFO:
rec = DNSHinfo(domain,
info[0], info[1], info[2],
self.read_character_string(),
self.read_character_string())
elif info[0] == _TYPE_RRSIG:
rec = DNSSignatureI(domain,
info[0], info[1], info[2],
self.read_string(18),
self.read_name(),
self.read_character_string())
elif info[0] == _TYPE_AAAA:
rec = DNSAddress(domain,
info[0], info[1], info[2],
self.read_string(16))
else:
# Try to ignore types we don't know about
# this may mean the rest of the name is
# unable to be parsed, and may show errors
# so this is left for debugging. New types
# encountered need to be parsed properly.
#
#print "UNKNOWN TYPE = " + str(info[0])
#raise BadTypeInNameException
pass
if rec is not None:
self.answers.append(rec)
|
Reads the answers, authorities and additionals section
of the packet
|
entailment
|
def read_utf(self, offset, len):
"""Reads a UTF-8 string of a given length from the packet"""
try:
result = self.data[offset:offset + len].decode('utf-8')
except UnicodeDecodeError:
result = str('')
return result
|
Reads a UTF-8 string of a given length from the packet
|
entailment
|
def read_name(self):
"""Reads a domain name from the packet"""
result = ''
off = self.offset
next = -1
first = off
while 1:
len = ord(self.data[off])
off += 1
if len == 0:
break
t = len & 0xC0
if t == 0x00:
result = ''.join((result, self.read_utf(off, len) + '.'))
off += len
elif t == 0xC0:
if next < 0:
next = off + 1
off = ((len & 0x3F) << 8) | ord(self.data[off])
if off >= first:
raise Exception(
"Bad domain name (circular) at " + str(off))
first = off
else:
raise Exception("Bad domain name at " + str(off))
if next >= 0:
self.offset = next
else:
self.offset = off
return result
|
Reads a domain name from the packet
|
entailment
|
def add_answer(self, inp, record):
"""Adds an answer"""
if not record.suppressed_by(inp):
self.add_answer_at_time(record, 0)
|
Adds an answer
|
entailment
|
def add_answer_at_time(self, record, now):
"""Adds an answer if if does not expire by a certain time"""
if record is not None:
if now == 0 or not record.is_expired(now):
self.answers.append((record, now))
if record.rrsig is not None:
self.answers.append((record.rrsig, now))
|
Adds an answer if if does not expire by a certain time
|
entailment
|
def write_byte(self, value):
"""Writes a single byte to the packet"""
format = '!B'
self.data.append(struct.pack(format, value))
self.size += 1
|
Writes a single byte to the packet
|
entailment
|
def insert_short(self, index, value):
"""Inserts an unsigned short in a certain position in the packet"""
format = '!H'
self.data.insert(index, struct.pack(format, value))
self.size += 2
|
Inserts an unsigned short in a certain position in the packet
|
entailment
|
def write_int(self, value):
"""Writes an unsigned integer to the packet"""
format = '!I'
self.data.append(struct.pack(format, int(value)))
self.size += 4
|
Writes an unsigned integer to the packet
|
entailment
|
def write_string(self, value, length):
"""Writes a string to the packet"""
format = '!' + str(length) + 's'
self.data.append(struct.pack(format, value))
self.size += length
|
Writes a string to the packet
|
entailment
|
def write_utf(self, s):
"""Writes a UTF-8 string of a given length to the packet"""
utfstr = s.encode('utf-8')
length = len(utfstr)
if length > 64:
raise NamePartTooLongException
self.write_byte(length)
self.write_string(utfstr, length)
|
Writes a UTF-8 string of a given length to the packet
|
entailment
|
def write_name(self, name):
"""Writes a domain name to the packet"""
try:
# Find existing instance of this name in packet
#
index = self.names[name]
except KeyError:
# No record of this name already, so write it
# out as normal, recording the location of the name
# for future pointers to it.
#
self.names[name] = self.size
parts = name.split('.')
if parts[-1] == '':
parts = parts[:-1]
for part in parts:
self.write_utf(part)
self.write_byte(0)
return
# An index was found, so write a pointer to it
#
self.write_byte((index >> 8) | 0xC0)
self.write_byte(index)
|
Writes a domain name to the packet
|
entailment
|
def write_question(self, question):
"""Writes a question to the packet"""
self.write_name(question.name)
self.write_short(question.type)
self.write_short(question.clazz)
|
Writes a question to the packet
|
entailment
|
def write_record(self, record, now):
"""Writes a record (answer, authoritative answer, additional) to
the packet"""
self.write_name(record.name)
self.write_short(record.type)
if record.unique and self.multicast:
self.write_short(record.clazz | _CLASS_UNIQUE)
else:
self.write_short(record.clazz)
if now == 0:
self.write_int(record.ttl)
else:
self.write_int(record.get_remaining_ttl(now))
index = len(self.data)
# Adjust size for the short we will write before this record
#
self.size += 2
record.write(self)
self.size -= 2
length = len(b''.join(self.data[index:]))
self.insert_short(index, length)
|
Writes a record (answer, authoritative answer, additional) to
the packet
|
entailment
|
def packet(self):
"""Returns a string containing the packet's bytes
No further parts should be added to the packet once this
is done."""
if not self.finished:
self.finished = 1
for question in self.questions:
self.write_question(question)
for answer, time in self.answers:
self.write_record(answer, time)
for authority in self.authorities:
self.write_record(authority, 0)
for additional in self.additionals:
self.write_record(additional, 0)
self.insert_short(0, len(self.additionals))
self.insert_short(0, len(self.authorities))
self.insert_short(0, len(self.answers))
self.insert_short(0, len(self.questions))
self.insert_short(0, self.flags)
if self.multicast:
self.insert_short(0, 0)
else:
self.insert_short(0, self.id)
return b''.join(self.data)
|
Returns a string containing the packet's bytes
No further parts should be added to the packet once this
is done.
|
entailment
|
def add(self, entry):
"""Adds an entry"""
if self.get(entry) is not None:
return
try:
list = self.cache[entry.key]
except:
list = self.cache[entry.key] = []
list.append(entry)
|
Adds an entry
|
entailment
|
def sign(self, entry, signer=None):
"""Adds and sign an entry"""
if (self.get(entry) is not None):
return
if (entry.rrsig is None) and (self.private is not None):
entry.rrsig = DNSSignatureS(entry.name,
_TYPE_RRSIG, _CLASS_IN, entry, self.private, signer)
self.add(entry)
if (self.private is not None):
self.add(entry.rrsig)
|
Adds and sign an entry
|
entailment
|
def remove(self, entry):
"""Removes an entry"""
try:
list = self.cache[entry.key]
list.remove(entry)
except:
pass
|
Removes an entry
|
entailment
|
def get(self, entry):
"""Gets an entry by key. Will return None if there is no
matching entry."""
try:
list = self.cache[entry.key]
return list[list.index(entry)]
except:
return None
|
Gets an entry by key. Will return None if there is no
matching entry.
|
entailment
|
def get_by_details(self, name, type, clazz):
"""Gets an entry by details. Will return None if there is
no matching entry."""
entry = DNSEntry(name, type, clazz)
return self.get(entry)
|
Gets an entry by details. Will return None if there is
no matching entry.
|
entailment
|
def entries(self):
"""Returns a list of all entries"""
def add(x, y):
return x + y
try:
return reduce(add, list(self.cache.values()))
except:
return []
|
Returns a list of all entries
|
entailment
|
def update_record(self, zeroconf, now, record):
"""Callback invoked by Zeroconf when new information arrives.
Updates information required by browser in the Zeroconf cache."""
if record.type == _TYPE_PTR and record.name == self.type:
expired = record.is_expired(now)
try:
oldrecord = self.services[record.alias.lower()]
if not expired:
oldrecord.reset_ttl(record)
else:
del(self.services[record.alias.lower()])
callback = lambda x: self.listener.remove_service(x,
self.type, record.alias)
self.list.append(callback)
return
except:
if not expired:
self.services[record.alias.lower()] = record
callback = lambda x: self.listener.add_service(x,
self.type, record.alias)
self.list.append(callback)
expires = record.get_expiration_time(75)
if expires < self.next_time:
self.next_time = expires
|
Callback invoked by Zeroconf when new information arrives.
Updates information required by browser in the Zeroconf cache.
|
entailment
|
def set_properties(self, properties):
"""Sets properties and text of this info from a dictionary"""
if isinstance(properties, dict):
self.properties = properties
self.sync_properties()
else:
self.text = properties
|
Sets properties and text of this info from a dictionary
|
entailment
|
def set_text(self, text):
"""Sets properties and text given a text field"""
self.text = text
try:
self.properties = text_to_dict(text)
except:
traceback.print_exc()
self.properties = None
|
Sets properties and text given a text field
|
entailment
|
def get_name(self):
"""Name accessor"""
if self.type is not None and self.name.endswith("." + self.type):
return self.name[:len(self.name) - len(self.type) - 1]
return self.name
|
Name accessor
|
entailment
|
def update_record(self, zeroconf, now, record):
"""Updates service information from a DNS record"""
if record is not None and not record.is_expired(now):
if record.type == _TYPE_A:
if record.name == self.name:
if not record.address in self.address:
self.address.append(record.address)
elif record.type == _TYPE_SRV:
if record.name == self.name:
self.server = record.server
self.port = record.port
self.weight = record.weight
self.priority = record.priority
self.address = []
self.update_record(zeroconf, now,
zeroconf.cache.get_by_details(self.server,
_TYPE_A, _CLASS_IN))
elif record.type == _TYPE_TXT:
if record.name == self.name:
self.set_text(record.text)
|
Updates service information from a DNS record
|
entailment
|
def request(self, zeroconf, timeout):
"""Returns true if the service could be discovered on the
network, and updates this object with details discovered.
"""
now = current_time_millis()
delay = _LISTENER_TIME
next = now + delay
last = now + timeout
result = 0
try:
zeroconf.add_listener(self,
DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN))
while self.server is None or \
len(self.address) == 0 or \
self.text is None:
if last <= now:
return 0
if next <= now:
out = DNSOutgoing(_FLAGS_QR_QUERY)
out.add_question(DNSQuestion(self.name,
_TYPE_SRV, _CLASS_IN))
out.add_answer_at_time(
zeroconf.cache.get_by_details(self.name,
_TYPE_SRV, _CLASS_IN), now)
out.add_question(
DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN))
out.add_answer_at_time(
zeroconf.cache.get_by_details(self.name,
_TYPE_TXT, _CLASS_IN), now)
if self.server is not None:
out.add_question(
DNSQuestion(self.server, _TYPE_A, _CLASS_IN))
out.add_answer_at_time(
zeroconf.cache.get_by_details(self.server,
_TYPE_A, _CLASS_IN), now)
zeroconf.send(out)
next = now + delay
delay = delay * 2
zeroconf.wait(min(next, last) - now)
now = current_time_millis()
result = 1
finally:
zeroconf.remove_listener(self)
return result
|
Returns true if the service could be discovered on the
network, and updates this object with details discovered.
|
entailment
|
def wait(self, timeout):
"""Calling thread waits for a given number of milliseconds or
until notified."""
self.condition.acquire()
self.condition.wait(timeout // 1000)
self.condition.release()
|
Calling thread waits for a given number of milliseconds or
until notified.
|
entailment
|
def notify_all(self):
"""Notifies all waiting threads"""
self.condition.acquire()
# python 3.x
try:
self.condition.notify_all()
except:
self.condition.notifyAll()
self.condition.release()
|
Notifies all waiting threads
|
entailment
|
def get_service_info(self, type, name, timeout=3000):
"""Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds."""
info = ServiceInfo(type, name)
if info.request(self, timeout):
return info
return None
|
Returns network's service information for a particular
name and type, or None if no service matches by the timeout,
which defaults to 3 seconds.
|
entailment
|
def add_serviceListener(self, type, listener):
"""Adds a listener for a particular service type. This object
will then have its update_record method called when information
arrives for that type."""
self.remove_service_listener(listener)
self.browsers.append(ServiceBrowser(self, type, listener))
|
Adds a listener for a particular service type. This object
will then have its update_record method called when information
arrives for that type.
|
entailment
|
def remove_service_listener(self, listener):
"""Removes a listener from the set that is currently listening."""
for browser in self.browsers:
if browser.listener == listener:
browser.cancel()
del(browser)
|
Removes a listener from the set that is currently listening.
|
entailment
|
def register_service(self, info):
"""Registers service information to the network with a default TTL
of 60 seconds. Zeroconf will then respond to requests for
information for that service. The name of the service may be
changed if needed to make it unique on the network."""
self.check_service(info)
self.services[info.name.lower()] = info
# zone transfer
self.transfer_zone(info.type)
self.announce_service(info.name)
|
Registers service information to the network with a default TTL
of 60 seconds. Zeroconf will then respond to requests for
information for that service. The name of the service may be
changed if needed to make it unique on the network.
|
entailment
|
def unregister_service(self, info):
"""Unregister a service."""
try:
del(self.services[info.name.lower()])
except:
pass
now = current_time_millis()
next_time = now
i = 0
while i < 3:
if now < next_time:
self.wait(next_time - now)
now = current_time_millis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.add_answer_at_time(
DNSPointer(info.type,
_TYPE_PTR, _CLASS_IN, 0, info.name), 0)
out.add_answer_at_time(
DNSService(info.name,
_TYPE_SRV, _CLASS_IN, 0, info.priority,
info.weight, info.port, info.name), 0)
out.add_answer_at_time(
DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0)
for k in info.address:
out.add_answer_at_time(
DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, k), 0)
self.send(out)
i += 1
next_time += _UNREGISTER_TIME
|
Unregister a service.
|
entailment
|
def check_service(self, info):
"""Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique."""
now = current_time_millis()
next_time = now
i = 0
while i < 3:
for record in self.cache.entries_with_name(info.type):
if record.type == _TYPE_PTR and \
not record.is_expired(now) and \
record.alias == info.name:
if (info.name.find('.') < 0):
info.name = info.name + ".[" + \
info.address + \
":" + info.port + \
"]." + info.type
self.check_service(info)
return
raise NonUniqueNameException
if now < next_time:
self.wait(next_time - now)
now = current_time_millis()
continue
out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA)
self.debug = out
out.add_question(
DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN))
out.add_authorative_answer(
DNSPointer(info.type,
_TYPE_PTR, _CLASS_IN, info.ttl, info.name))
self.send(out)
i += 1
next_time += _CHECK_TIME
|
Checks the network for a unique service name, modifying the
ServiceInfo passed in if it is not unique.
|
entailment
|
def add_listener(self, listener, question):
"""Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question."""
now = current_time_millis()
self.listeners.append(listener)
if question is not None:
for record in self.cache.entries_with_name(question.name):
if question.answered_by(record) and not record.is_expired(now):
listener.update_record(self, now, record)
self.notify_all()
|
Adds a listener for a given question. The listener will have
its update_record method called when information is available to
answer the question.
|
entailment
|
def update_record(self, now, rec):
"""Used to notify listeners of new information that has updated
a record."""
for listener in self.listeners:
listener.update_record(self, now, rec)
self.notify_all()
|
Used to notify listeners of new information that has updated
a record.
|
entailment
|
def handle_response(self, msg, address):
"""Deal with incoming response packets. All answers
are held in the cache, and listeners are notified."""
now = current_time_millis()
sigs = []
precache = []
for record in msg.answers:
if isinstance(record, DNSSignature):
sigs.append(record)
else:
precache.append(record)
for e in precache:
for s in sigs:
if self.verify(e, s):
# print "DNS: %s verified with %s" % (e,s)
if self.adaptive and e.type == _TYPE_A:
if e.address == '\x00\x00\x00\x00':
e.address = socket.inet_aton(address)
if e in self.cache.entries():
if e.is_expired(now):
for i in self.hooks:
try:
i.remove(e)
except:
pass
self.cache.remove(e)
self.cache.remove(s)
else:
entry = self.cache.get(e)
sig = self.cache.get(s)
if (entry is not None) and (sig is not None):
for i in self.hooks:
try:
i.update(e)
except:
pass
entry.reset_ttl(e)
sig.reset_ttl(s)
else:
e.rrsig = s
self.cache.add(e)
self.cache.add(s)
for i in self.hooks:
try:
i.add(e)
except:
pass
precache.remove(e)
sigs.remove(s)
self.update_record(now, record)
if self.bypass:
for e in precache:
if e in self.cache.entries():
if e.is_expired(now):
for i in self.hooks:
try:
i.remove(e)
except:
pass
self.cache.remove(e)
else:
entry = self.cache.get(e)
if (entry is not None):
for i in self.hooks:
try:
i.update(e)
except:
pass
entry.reset_ttl(e)
else:
self.cache.add(e)
for i in self.hooks:
try:
i.add(e)
except:
pass
self.update_record(now, record)
|
Deal with incoming response packets. All answers
are held in the cache, and listeners are notified.
|
entailment
|
def handle_query(self, msg, addr, port, orig):
"""
Deal with incoming query packets. Provides a response if
possible.
msg - message to process
addr - dst addr
port - dst port
orig - originating address (for adaptive records)
"""
out = None
# Support unicast client responses
#
if port != _MDNS_PORT:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0)
for question in msg.questions:
out.add_question(question)
for question in msg.questions:
if question.type == _TYPE_PTR:
for service in self.services.values():
if question.name == service.type:
# FIXME: sometimes we just not in time filling cache
answer = self.cache.get(
DNSPointer(service.type,
_TYPE_PTR, _CLASS_IN,
service.ttl, service.name))
if out is None and answer is not None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
if answer:
out.add_answer(msg, answer)
if question.type == _TYPE_AXFR:
if question.name in list(self.zones.keys()):
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
for i in self.zones[question.name].services.values():
out.add_answer(msg, i)
else:
try:
if out is None:
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
service = self.services.get(question.name.lower(), None)
try:
rs = service.records
except:
rs = []
# Answer A record queries for any service addresses we know
if (question.type == _TYPE_A or \
question.type == _TYPE_ANY) \
and (_TYPE_A in rs):
for service in self.services.values():
if service.server == question.name.lower():
for i in service.address:
out.add_answer(msg, self.cache.get(
DNSAddress(question.name,
_TYPE_A, _CLASS_IN | _CLASS_UNIQUE,
service.ttl, i)))
if not service:
continue
if (question.type == _TYPE_SRV or \
question.type == _TYPE_ANY) and (_TYPE_SRV in rs):
out.add_answer(msg, self.cache.get(
DNSService(question.name,
_TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE,
service.ttl, service.priority, service.weight,
service.port, service.server)))
if (question.type == _TYPE_TXT or \
question.type == _TYPE_ANY) and \
(_TYPE_TXT in rs):
out.add_answer(msg, self.cache.get(
DNSText(question.name,
_TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE,
service.ttl, service.text)))
if (question.type == _TYPE_SRV) and (_TYPE_SRV in rs):
for i in service.address:
out.add_additional_answer(self.cache.get(
DNSAddress(service.server,
_TYPE_A, _CLASS_IN | _CLASS_UNIQUE,
service.ttl, i)))
except:
traceback.print_exc()
if out is not None and out.answers:
out.id = msg.id
self.send(out, addr, port)
|
Deal with incoming query packets. Provides a response if
possible.
msg - message to process
addr - dst addr
port - dst port
orig - originating address (for adaptive records)
|
entailment
|
def send(self, out, addr=_MDNS_ADDR, port=_MDNS_PORT):
"""Sends an outgoing packet."""
# This is a quick test to see if we can parse the packets we generate
#temp = DNSIncoming(out.packet())
for i in self.intf.values():
try:
return i.sendto(out.packet(), 0, (addr, port))
except:
traceback.print_exc()
# Ignore this, it may be a temporary loss of network connection
return -1
|
Sends an outgoing packet.
|
entailment
|
def close(self):
"""Ends the background threads, and prevent this instance from
servicing further queries."""
if globals()['_GLOBAL_DONE'] == 0:
globals()['_GLOBAL_DONE'] = 1
self.notify_all()
self.engine.notify()
self.unregister_all_services()
for i in self.intf.values():
try:
# there are cases, when we start mDNS without network
i.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP,
socket.inet_aton(_MDNS_ADDR) + \
socket.inet_aton('0.0.0.0'))
except:
pass
i.close()
|
Ends the background threads, and prevent this instance from
servicing further queries.
|
entailment
|
def execute(self, identity_records: 'RDD', old_state_rdd: Optional['RDD'] = None) -> 'RDD':
"""
Executes Blurr BTS with the given records. old_state_rdd can be provided to load an older
state from a previous run.
:param identity_records: RDD of the form Tuple[Identity, List[TimeAndRecord]]
:param old_state_rdd: A previous streaming BTS state RDD as Tuple[Identity, Streaming BTS
State]
:return: RDD[Identity, Tuple[Streaming BTS State, List of Window BTS output]]
"""
identity_records_with_state = identity_records
if old_state_rdd:
identity_records_with_state = identity_records.fullOuterJoin(old_state_rdd)
return identity_records_with_state.map(lambda x: self._execute_per_identity_records(x))
|
Executes Blurr BTS with the given records. old_state_rdd can be provided to load an older
state from a previous run.
:param identity_records: RDD of the form Tuple[Identity, List[TimeAndRecord]]
:param old_state_rdd: A previous streaming BTS state RDD as Tuple[Identity, Streaming BTS
State]
:return: RDD[Identity, Tuple[Streaming BTS State, List of Window BTS output]]
|
entailment
|
def get_record_rdd_from_json_files(self,
json_files: List[str],
data_processor: DataProcessor = SimpleJsonDataProcessor(),
spark_session: Optional['SparkSession'] = None) -> 'RDD':
"""
Reads the data from the given json_files path and converts them into the `Record`s format for
processing. `data_processor` is used to process the per event data in those files to convert
them into `Record`.
:param json_files: List of json file paths. Regular Spark path wildcards are accepted.
:param data_processor: `DataProcessor` to process each event in the json files.
:param spark_session: `SparkSession` to use for execution. If None is provided then a basic
`SparkSession` is created.
:return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in
`execute()`
"""
spark_context = get_spark_session(spark_session).sparkContext
raw_records: 'RDD' = spark_context.union(
[spark_context.textFile(file) for file in json_files])
return raw_records.mapPartitions(
lambda x: self.get_per_identity_records(x, data_processor)).groupByKey().mapValues(list)
|
Reads the data from the given json_files path and converts them into the `Record`s format for
processing. `data_processor` is used to process the per event data in those files to convert
them into `Record`.
:param json_files: List of json file paths. Regular Spark path wildcards are accepted.
:param data_processor: `DataProcessor` to process each event in the json files.
:param spark_session: `SparkSession` to use for execution. If None is provided then a basic
`SparkSession` is created.
:return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in
`execute()`
|
entailment
|
def get_record_rdd_from_rdd(
self,
rdd: 'RDD',
data_processor: DataProcessor = SimpleDictionaryDataProcessor(),
) -> 'RDD':
"""
Converts a RDD of raw events into the `Record`s format for processing. `data_processor` is
used to process the per row data to convert them into `Record`.
:param rdd: RDD containing the raw events.
:param data_processor: `DataProcessor` to process each row in the given `rdd`.
:return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in
`execute()`
"""
return rdd.mapPartitions(
lambda x: self.get_per_identity_records(x, data_processor)).groupByKey().mapValues(list)
|
Converts a RDD of raw events into the `Record`s format for processing. `data_processor` is
used to process the per row data to convert them into `Record`.
:param rdd: RDD containing the raw events.
:param data_processor: `DataProcessor` to process each row in the given `rdd`.
:return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in
`execute()`
|
entailment
|
def write_output_file(self,
path: str,
per_identity_data: 'RDD',
spark_session: Optional['SparkSession'] = None) -> None:
"""
Basic helper function to persist data to disk.
If window BTS was provided then the window BTS output to written in csv format, otherwise,
the streaming BTS output is written in JSON format to the `path` provided
:param path: Path where the output should be written.
:param per_identity_data: Output of the `execute()` call.
:param spark_session: `SparkSession` to use for execution. If None is provided then a basic
`SparkSession` is created.
:return:
"""
_spark_session_ = get_spark_session(spark_session)
if not self._window_bts:
per_identity_data.flatMap(
lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()]
).saveAsTextFile(path)
else:
# Convert to a DataFrame first so that the data can be saved as a CSV
_spark_session_.createDataFrame(per_identity_data.flatMap(lambda x: x[1][1])).write.csv(
path, header=True)
|
Basic helper function to persist data to disk.
If window BTS was provided then the window BTS output to written in csv format, otherwise,
the streaming BTS output is written in JSON format to the `path` provided
:param path: Path where the output should be written.
:param per_identity_data: Output of the `execute()` call.
:param spark_session: `SparkSession` to use for execution. If None is provided then a basic
`SparkSession` is created.
:return:
|
entailment
|
def print_output(self, per_identity_data: 'RDD') -> None:
"""
Basic helper function to write data to stdout. If window BTS was provided then the window
BTS output is written, otherwise, the streaming BTS output is written to stdout.
WARNING - For large datasets this will be extremely slow.
:param per_identity_data: Output of the `execute()` call.
"""
if not self._window_bts:
data = per_identity_data.flatMap(
lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()])
else:
# Convert to a DataFrame first so that the data can be saved as a CSV
data = per_identity_data.map(
lambda x: json.dumps((x[0], x[1][1]), cls=BlurrJSONEncoder))
for row in data.collect():
print(row)
|
Basic helper function to write data to stdout. If window BTS was provided then the window
BTS output is written, otherwise, the streaming BTS output is written to stdout.
WARNING - For large datasets this will be extremely slow.
:param per_identity_data: Output of the `execute()` call.
|
entailment
|
def find_executable(executable, path=None):
"""
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
"""
if sys.platform != 'win32':
return distutils.spawn.find_executable(executable, path)
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
base, ext = os.path.splitext(executable)
if not os.path.isfile(executable):
for p in paths:
for ext in extensions:
f = os.path.join(p, base + ext)
if os.path.isfile(f):
return f
return None
else:
return executable
|
As distutils.spawn.find_executable, but on Windows, look up
every extension declared in PATHEXT instead of just `.exe`
|
entailment
|
def create_environment_dict(overrides):
"""
Create and return a copy of os.environ with the specified overrides
"""
result = os.environ.copy()
result.update(overrides or {})
return result
|
Create and return a copy of os.environ with the specified overrides
|
entailment
|
def get(self, server):
""" Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
"""
if not isinstance(server, six.binary_type):
server = server.encode('utf-8')
data = self._execute('get', server)
result = json.loads(data.decode('utf-8'))
# docker-credential-pass will return an object for inexistent servers
# whereas other helpers will exit with returncode != 0. For
# consistency, if no significant data is returned,
# raise CredentialsNotFound
if result['Username'] == '' and result['Secret'] == '':
raise errors.CredentialsNotFound(
'No matching credentials in {}'.format(self.program)
)
return result
|
Retrieve credentials for `server`. If no credentials are found,
a `StoreError` will be raised.
|
entailment
|
def store(self, server, username, secret):
""" Store credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
data_input = json.dumps({
'ServerURL': server,
'Username': username,
'Secret': secret
}).encode('utf-8')
return self._execute('store', data_input)
|
Store credentials for `server`. Raises a `StoreError` if an error
occurs.
|
entailment
|
def erase(self, server):
""" Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
"""
if not isinstance(server, six.binary_type):
server = server.encode('utf-8')
self._execute('erase', server)
|
Erase credentials for `server`. Raises a `StoreError` if an error
occurs.
|
entailment
|
def get_identity(self, record: Record) -> str:
"""
Evaluates and returns the identity as specified in the schema.
:param record: Record which is used to determine the identity.
:return: The evaluated identity
:raises: IdentityError if identity cannot be determined.
"""
context = self.schema_context.context
context.add_record(record)
identity = self.identity.evaluate(context)
if not identity:
raise IdentityError('Could not determine identity using {}. Record is {}'.format(
self.identity.code_string, record))
context.remove_record()
return identity
|
Evaluates and returns the identity as specified in the schema.
:param record: Record which is used to determine the identity.
:return: The evaluated identity
:raises: IdentityError if identity cannot be determined.
|
entailment
|
def run_evaluate(self, record: Record):
"""
Evaluates and updates data in the StreamingTransformer.
:param record: The 'source' record used for the update.
:raises: IdentityError if identity is different from the one used during
initialization.
"""
record_identity = self._schema.get_identity(record)
if self._identity != record_identity:
raise IdentityError(
'Identity in transformer ({}) and new record ({}) do not match'.format(
self._identity, record_identity))
# Add source record and time to the global context
self._evaluation_context.add_record(record)
self._evaluation_context.global_add(
'time',
DateTimeFieldSchema.sanitize_object(
self._schema.time.evaluate(self._evaluation_context)))
super().run_evaluate()
# Cleanup source and time form the context
self._evaluation_context.remove_record()
self._evaluation_context.global_remove('time')
|
Evaluates and updates data in the StreamingTransformer.
:param record: The 'source' record used for the update.
:raises: IdentityError if identity is different from the one used during
initialization.
|
entailment
|
def extend_schema_spec(self) -> None:
""" Injects the identity field """
super().extend_schema_spec()
identity_field = {
'Name': '_identity',
'Type': BtsType.STRING,
'Value': 'identity',
ATTRIBUTE_INTERNAL: True
}
if self.ATTRIBUTE_FIELDS in self._spec:
self._spec[self.ATTRIBUTE_FIELDS].insert(0, identity_field)
self.schema_loader.add_schema_spec(identity_field, self.fully_qualified_name)
|
Injects the identity field
|
entailment
|
def _persist(self) -> None:
"""
Persists the current data group
"""
if self._store:
self._store.save(self._key, self._snapshot)
|
Persists the current data group
|
entailment
|
def add_schema_spec(self, spec: Dict[str, Any],
fully_qualified_parent_name: str = None) -> Optional[str]:
"""
Add a schema dictionary to the schema loader. The given schema is stored
against fully_qualified_parent_name + ITEM_SEPARATOR('.') + schema.name.
:param spec: Schema specification.
:param fully_qualified_parent_name: Full qualified name of the parent.
If None is passed then the schema is stored against the schema name.
:return: The fully qualified name against which the spec is stored.
None is returned if the given spec is not a dictionary or the spec does not
contain a 'name' key.
"""
if not isinstance(spec, dict) or ATTRIBUTE_NAME not in spec:
return None
name = spec[ATTRIBUTE_NAME]
fully_qualified_name = name if fully_qualified_parent_name is None else self.get_fully_qualified_name(
fully_qualified_parent_name, name)
# Ensure that basic validation for each spec part is done before it is added to spec cache
if isinstance(spec, dict):
self._error_cache.add(
validate_required_attributes(fully_qualified_name, spec, ATTRIBUTE_NAME,
ATTRIBUTE_TYPE))
if ATTRIBUTE_TYPE in spec and not Type.contains(spec[ATTRIBUTE_TYPE]):
self._error_cache.add(
InvalidTypeError(fully_qualified_name, spec, ATTRIBUTE_TYPE,
InvalidTypeError.Reason.TYPE_NOT_DEFINED))
self._spec_cache[fully_qualified_name] = spec
for key, val in spec.items():
if isinstance(val, list):
for item in val:
self.add_schema_spec(item, fully_qualified_name)
self.add_schema_spec(val, fully_qualified_name)
return spec[ATTRIBUTE_NAME]
|
Add a schema dictionary to the schema loader. The given schema is stored
against fully_qualified_parent_name + ITEM_SEPARATOR('.') + schema.name.
:param spec: Schema specification.
:param fully_qualified_parent_name: Full qualified name of the parent.
If None is passed then the schema is stored against the schema name.
:return: The fully qualified name against which the spec is stored.
None is returned if the given spec is not a dictionary or the spec does not
contain a 'name' key.
|
entailment
|
def add_errors(self, *errors: Union[BaseSchemaError, SchemaErrorCollection]) -> None:
""" Adds errors to the error store for the schema """
for error in errors:
self._error_cache.add(error)
|
Adds errors to the error store for the schema
|
entailment
|
def get_schema_object(self, fully_qualified_name: str) -> 'BaseSchema':
"""
Used to generate a schema object from the given fully_qualified_name.
:param fully_qualified_name: The fully qualified name of the object needed.
:return: An initialized schema object
"""
if fully_qualified_name not in self._schema_cache:
spec = self.get_schema_spec(fully_qualified_name)
if spec:
try:
self._schema_cache[fully_qualified_name] = TypeLoader.load_schema(
spec.get(ATTRIBUTE_TYPE, None))(fully_qualified_name, self)
except TypeLoaderError as err:
self.add_errors(
InvalidTypeError(fully_qualified_name, spec, ATTRIBUTE_TYPE,
InvalidTypeError.Reason.TYPE_NOT_LOADED,
err.type_class_name))
return self._schema_cache.get(fully_qualified_name, None)
|
Used to generate a schema object from the given fully_qualified_name.
:param fully_qualified_name: The fully qualified name of the object needed.
:return: An initialized schema object
|
entailment
|
def get_store(self, fully_qualified_name: str) -> Optional['Store']:
"""
Used to generate a store object from the given fully_qualified_name.
:param fully_qualified_name: The fully qualified name of the store object needed.
:return: An initialized store object
"""
if fully_qualified_name not in self._store_cache:
schema = self.get_schema_object(fully_qualified_name)
if not schema:
return None
if Type.is_store_type(schema.type):
self._store_cache[fully_qualified_name] = TypeLoader.load_item(schema.type)(schema)
else:
self.add_errors(
InvalidTypeError(fully_qualified_name, {}, ATTRIBUTE_TYPE,
InvalidTypeError.Reason.INCORRECT_BASE, schema.type,
InvalidTypeError.BaseTypes.STORE))
return self._store_cache.get(fully_qualified_name, None)
|
Used to generate a store object from the given fully_qualified_name.
:param fully_qualified_name: The fully qualified name of the store object needed.
:return: An initialized store object
|
entailment
|
def get_nested_schema_object(self, fully_qualified_parent_name: str,
nested_item_name: str) -> Optional['BaseSchema']:
"""
Used to generate a schema object from the given fully_qualified_parent_name
and the nested_item_name.
:param fully_qualified_parent_name: The fully qualified name of the parent.
:param nested_item_name: The nested item name.
:return: An initialized schema object of the nested item.
"""
return self.get_schema_object(
self.get_fully_qualified_name(fully_qualified_parent_name, nested_item_name))
|
Used to generate a schema object from the given fully_qualified_parent_name
and the nested_item_name.
:param fully_qualified_parent_name: The fully qualified name of the parent.
:param nested_item_name: The nested item name.
:return: An initialized schema object of the nested item.
|
entailment
|
def get_fully_qualified_name(fully_qualified_parent_name: str, nested_item_name: str) -> str:
"""
Returns the fully qualified name by combining the fully_qualified_parent_name
and nested_item_name.
:param fully_qualified_parent_name: The fully qualified name of the parent.
:param nested_item_name: The nested item name.
:return: The fully qualified name of the nested item.
"""
return fully_qualified_parent_name + SchemaLoader.ITEM_SEPARATOR + nested_item_name
|
Returns the fully qualified name by combining the fully_qualified_parent_name
and nested_item_name.
:param fully_qualified_parent_name: The fully qualified name of the parent.
:param nested_item_name: The nested item name.
:return: The fully qualified name of the nested item.
|
entailment
|
def get_schema_spec(self, fully_qualified_name: str) -> Dict[str, Any]:
"""
Used to retrieve the specifications of the schema from the given
fully_qualified_name of schema.
:param fully_qualified_name: The fully qualified name of the schema needed.
:return: Schema dictionary.
"""
if fully_qualified_name not in self._spec_cache:
self.add_errors(SpecNotFoundError(fully_qualified_name, {}))
return self._spec_cache.get(fully_qualified_name, None)
|
Used to retrieve the specifications of the schema from the given
fully_qualified_name of schema.
:param fully_qualified_name: The fully qualified name of the schema needed.
:return: Schema dictionary.
|
entailment
|
def get_schema_specs_of_type(self, *schema_types: Type) -> Dict[str, Dict[str, Any]]:
"""
Returns a list of fully qualified names and schema dictionary tuples for
the schema types provided.
:param schema_types: Schema types.
:return: List of fully qualified names and schema dictionary tuples.
"""
return {
fq_name: schema
for fq_name, schema in self._spec_cache.items()
if Type.is_type_in(schema.get(ATTRIBUTE_TYPE, ''), list(schema_types))
}
|
Returns a list of fully qualified names and schema dictionary tuples for
the schema types provided.
:param schema_types: Schema types.
:return: List of fully qualified names and schema dictionary tuples.
|
entailment
|
def global_add(self, key: str, value: Any) -> None:
"""
Adds a key and value to the global dictionary
"""
self.global_context[key] = value
|
Adds a key and value to the global dictionary
|
entailment
|
def merge(self, evaluation_context: 'EvaluationContext') -> None:
"""
Merges the provided evaluation context to the current evaluation context.
:param evaluation_context: Evaluation context to merge.
"""
self.global_context.merge(evaluation_context.global_context)
self.local_context.merge(evaluation_context.local_context)
|
Merges the provided evaluation context to the current evaluation context.
:param evaluation_context: Evaluation context to merge.
|
entailment
|
def evaluate(self, evaluation_context: EvaluationContext) -> Any:
"""
Evaluates the expression with the context provided. If the execution
results in failure, an ExpressionEvaluationException encapsulating the
underlying exception is raised.
:param evaluation_context: Global and local context dictionary to be passed for evaluation
"""
try:
if self.type == ExpressionType.EVAL:
return eval(self.code_object, evaluation_context.global_context,
evaluation_context.local_context)
elif self.type == ExpressionType.EXEC:
return exec(self.code_object, evaluation_context.global_context,
evaluation_context.local_context)
except Exception as err:
# Evaluation exceptions are expected because of missing fields in the source 'Record'.
logging.debug('{} in evaluating expression {}. Error: {}'.format(
type(err).__name__, self.code_string, err))
# These should result in an exception being raised:
# NameError - Exceptions thrown because of using names in the expression which are not
# present in EvaluationContext. A common cause for this is typos in the BTS.
# MissingAttributeError - Exception thrown when a BTS nested item is used which does not
# exist. Should only happen for erroneous BTSs.
# ImportError - Thrown when there is a failure in importing other modules.
if isinstance(err, (NameError, MissingAttributeError, ImportError)):
raise err
return None
|
Evaluates the expression with the context provided. If the execution
results in failure, an ExpressionEvaluationException encapsulating the
underlying exception is raised.
:param evaluation_context: Global and local context dictionary to be passed for evaluation
|
entailment
|
def _copy_files(source, target):
"""
Copy all the files in source directory to target.
Ignores subdirectories.
"""
source_files = listdir(source)
if not exists(target):
makedirs(target)
for filename in source_files:
full_filename = join(source, filename)
if isfile(full_filename):
shutil.copy(full_filename, target)
|
Copy all the files in source directory to target.
Ignores subdirectories.
|
entailment
|
def create_copy(self):
"""
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
"""
if self.executable is None or not isfile(self.executable):
raise FileNotFoundError(
"Could not find MAGICC{} executable: {}".format(
self.version, self.executable
)
)
if self.is_temp:
assert (
self.root_dir is None
), "A temp copy for this instance has already been created"
self.root_dir = mkdtemp(prefix="pymagicc-")
if exists(self.run_dir):
raise Exception("A copy of MAGICC has already been created.")
if not exists(self.root_dir):
makedirs(self.root_dir)
exec_dir = basename(self.original_dir)
# Copy a subset of folders from the MAGICC `original_dir`
# Also copy anything which is in the root of the MAGICC distribution
# Assumes that the MAGICC binary is in a folder one level below the root
# of the MAGICC distribution. i.e. /run/magicc.exe or /bin/magicc
dirs_to_copy = [".", "bin", "run"]
# Check that the executable is in a valid sub directory
assert exec_dir in dirs_to_copy, "binary must be in bin/ or run/ directory"
for d in dirs_to_copy:
source_dir = abspath(join(self.original_dir, "..", d))
if exists(source_dir):
_copy_files(source_dir, join(self.root_dir, d))
# Create an empty out dir
# MAGICC assumes that the 'out' directory already exists
makedirs(join(self.root_dir, "out"))
# Create basic configuration files so magicc can run
self.set_years()
self.set_config()
|
Initialises a temporary directory structure and copy of MAGICC
configuration files and binary.
|
entailment
|
def run(self, scenario=None, only=None, **kwargs):
"""
Run MAGICC and parse the output.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run. If None MAGICC will simply run with whatever config has
already been set.
only : list of str
If not None, only extract variables in this list.
kwargs
Other config values to pass to MAGICC for the run
Returns
-------
:obj:`pymagicc.io.MAGICCData`
MAGICCData object containing that data in its ``df`` attribute and
metadata and parameters (depending on the value of ``include_parameters``)
in its ``metadata`` attribute.
Raises
------
ValueError
If no output is found which matches the list specified in ``only``.
"""
if not exists(self.root_dir):
raise FileNotFoundError(self.root_dir)
if self.executable is None:
raise ValueError(
"MAGICC executable not found, try setting an environment variable `MAGICC_EXECUTABLE_{}=/path/to/binary`".format(
self.version
)
)
if scenario is not None:
kwargs = self.set_emission_scenario_setup(scenario, kwargs)
yr_config = {}
if "startyear" in kwargs:
yr_config["startyear"] = kwargs.pop("startyear")
if "endyear" in kwargs:
yr_config["endyear"] = kwargs.pop("endyear")
if yr_config:
self.set_years(**yr_config)
# should be able to do some other nice metadata stuff re how magicc was run
# etc. here
kwargs.setdefault("rundate", get_date_time_string())
self.update_config(**kwargs)
self.check_config()
exec_dir = basename(self.original_dir)
command = [join(self.root_dir, exec_dir, self.binary_name)]
if not IS_WINDOWS and self.binary_name.endswith(".exe"): # pragma: no cover
command.insert(0, "wine")
# On Windows shell=True is required.
subprocess.check_call(command, cwd=self.run_dir, shell=IS_WINDOWS)
outfiles = self._get_output_filenames()
read_cols = {"climate_model": ["MAGICC{}".format(self.version)]}
if scenario is not None:
read_cols["model"] = scenario["model"].unique().tolist()
read_cols["scenario"] = scenario["scenario"].unique().tolist()
else:
read_cols.setdefault("model", ["unspecified"])
read_cols.setdefault("scenario", ["unspecified"])
mdata = None
for filepath in outfiles:
try:
openscm_var = _get_openscm_var_from_filepath(filepath)
if only is None or openscm_var in only:
tempdata = MAGICCData(
join(self.out_dir, filepath), columns=deepcopy(read_cols)
)
mdata = mdata.append(tempdata) if mdata is not None else tempdata
except (NoReaderWriterError, InvalidTemporalResError):
continue
if mdata is None:
error_msg = "No output found for only={}".format(only)
raise ValueError(error_msg)
try:
run_paras = self.read_parameters()
self.config = run_paras
mdata.metadata["parameters"] = run_paras
except FileNotFoundError:
pass
return mdata
|
Run MAGICC and parse the output.
As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its
parameters into ``out/PARAMETERS.OUT`` and they will then be read into
``output.metadata["parameters"]`` where ``output`` is the returned object.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run. If None MAGICC will simply run with whatever config has
already been set.
only : list of str
If not None, only extract variables in this list.
kwargs
Other config values to pass to MAGICC for the run
Returns
-------
:obj:`pymagicc.io.MAGICCData`
MAGICCData object containing that data in its ``df`` attribute and
metadata and parameters (depending on the value of ``include_parameters``)
in its ``metadata`` attribute.
Raises
------
ValueError
If no output is found which matches the list specified in ``only``.
|
entailment
|
def check_config(self):
"""Check that our MAGICC ``.CFG`` files are set to safely work with PYMAGICC
For further detail about why this is required, please see :ref:`MAGICC flags`.
Raises
------
ValueError
If we are not certain that the config written by PYMAGICC will overwrite
all other config i.e. that there will be no unexpected behaviour. A
ValueError will also be raised if the user tries to use more than one
scenario file.
"""
cfg_error_msg = (
"PYMAGICC is not the only tuning model that will be used by "
"`MAGCFG_USER.CFG`: your run is likely to fail/do odd things"
)
emisscen_error_msg = (
"You have more than one `FILE_EMISSCEN_X` flag set. Using more than "
"one emissions scenario is hard to debug and unnecessary with "
"Pymagicc's dataframe scenario input. Please combine all your "
"scenarios into one dataframe with Pymagicc and pandas, then feed "
"this single Dataframe into Pymagicc's run API."
)
nml_to_check = "nml_allcfgs"
usr_cfg = read_cfg_file(join(self.run_dir, "MAGCFG_USER.CFG"))
for k in usr_cfg[nml_to_check]:
if k.startswith("file_tuningmodel"):
first_tuningmodel = k in ["file_tuningmodel", "file_tuningmodel_1"]
if first_tuningmodel:
if usr_cfg[nml_to_check][k] != "PYMAGICC":
raise ValueError(cfg_error_msg)
elif usr_cfg[nml_to_check][k] not in ["USER", ""]:
raise ValueError(cfg_error_msg)
elif k.startswith("file_emisscen_"):
if usr_cfg[nml_to_check][k] not in ["NONE", ""]:
raise ValueError(emisscen_error_msg)
|
Check that our MAGICC ``.CFG`` files are set to safely work with PYMAGICC
For further detail about why this is required, please see :ref:`MAGICC flags`.
Raises
------
ValueError
If we are not certain that the config written by PYMAGICC will overwrite
all other config i.e. that there will be no unexpected behaviour. A
ValueError will also be raised if the user tries to use more than one
scenario file.
|
entailment
|
def write(self, mdata, name):
"""Write an input file to disk
Parameters
----------
mdata : :obj:`pymagicc.io.MAGICCData`
A MAGICCData instance with the data to write
name : str
The name of the file to write. The file will be written to the MAGICC
instance's run directory i.e. ``self.run_dir``
"""
mdata.write(join(self.run_dir, name), self.version)
|
Write an input file to disk
Parameters
----------
mdata : :obj:`pymagicc.io.MAGICCData`
A MAGICCData instance with the data to write
name : str
The name of the file to write. The file will be written to the MAGICC
instance's run directory i.e. ``self.run_dir``
|
entailment
|
def read_parameters(self):
"""
Read a parameters.out file
Returns
-------
dict
A dictionary containing all the configuration used by MAGICC
"""
param_fname = join(self.out_dir, "PARAMETERS.OUT")
if not exists(param_fname):
raise FileNotFoundError("No PARAMETERS.OUT found")
with open(param_fname) as nml_file:
parameters = dict(f90nml.read(nml_file))
for group in ["nml_years", "nml_allcfgs", "nml_outputcfgs"]:
parameters[group] = dict(parameters[group])
for k, v in parameters[group].items():
parameters[group][k] = _clean_value(v)
parameters[group.replace("nml_", "")] = parameters.pop(group)
self.config = parameters
return parameters
|
Read a parameters.out file
Returns
-------
dict
A dictionary containing all the configuration used by MAGICC
|
entailment
|
def remove_temp_copy(self):
"""
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
"""
if self.is_temp and self.root_dir is not None:
shutil.rmtree(self.root_dir)
self.root_dir = None
|
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
|
entailment
|
def set_config(
self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs
):
"""
Create a configuration file for MAGICC.
Writes a fortran namelist in run_dir.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
"""
kwargs = self._format_config(kwargs)
fname = join(self.run_dir, filename)
conf = {top_level_key: kwargs}
f90nml.write(conf, fname, force=True)
return conf
|
Create a configuration file for MAGICC.
Writes a fortran namelist in run_dir.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
|
entailment
|
def update_config(
self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs
):
"""Updates a configuration file for MAGICC
Updates the contents of a fortran namelist in the run directory,
creating a new namelist if none exists.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
"""
kwargs = self._format_config(kwargs)
fname = join(self.run_dir, filename)
if exists(fname):
conf = f90nml.read(fname)
else:
conf = {top_level_key: {}}
conf[top_level_key].update(kwargs)
f90nml.write(conf, fname, force=True)
return conf
|
Updates a configuration file for MAGICC
Updates the contents of a fortran namelist in the run directory,
creating a new namelist if none exists.
Parameters
----------
filename : str
Name of configuration file to write
top_level_key : str
Name of namelist to be written in the
configuration file
kwargs
Other parameters to pass to the configuration file. No
validation on the parameters is performed.
Returns
-------
dict
The contents of the namelist which was written to file
|
entailment
|
def set_zero_config(self):
"""Set config such that radiative forcing and temperature output will be zero
This method is intended as a convenience only, it does not handle everything in
an obvious way. Adjusting the parameter settings still requires great care and
may behave unepexctedly.
"""
# zero_emissions is imported from scenarios module
zero_emissions.write(join(self.run_dir, self._scen_file_name), self.version)
time = zero_emissions.filter(variable="Emissions|CH4", region="World")[
"time"
].values
no_timesteps = len(time)
# value doesn't actually matter as calculations are done from difference but
# chose sensible value nonetheless
ch4_conc_pi = 722
ch4_conc = ch4_conc_pi * np.ones(no_timesteps)
ch4_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": "Atmospheric Concentrations|CH4",
"unit": "ppb",
"todo": "SET",
"region": "World",
"value": ch4_conc,
}
)
ch4_conc_writer = MAGICCData(ch4_conc_df)
ch4_conc_filename = "HIST_CONSTANT_CH4_CONC.IN"
ch4_conc_writer.metadata = {
"header": "Constant pre-industrial CH4 concentrations"
}
ch4_conc_writer.write(join(self.run_dir, ch4_conc_filename), self.version)
fgas_conc_pi = 0
fgas_conc = fgas_conc_pi * np.ones(no_timesteps)
# MAGICC6 doesn't read this so not a problem, for MAGICC7 we might have to
# write each file separately
varname = "FGAS_CONC"
fgas_conc_df = pd.DataFrame(
{
"time": time,
"scenario": "idealised",
"model": "unspecified",
"climate_model": "unspecified",
"variable": varname,
"unit": "ppt",
"todo": "SET",
"region": "World",
"value": fgas_conc,
}
)
fgas_conc_writer = MAGICCData(fgas_conc_df)
fgas_conc_filename = "HIST_ZERO_{}.IN".format(varname)
fgas_conc_writer.metadata = {"header": "Zero concentrations"}
fgas_conc_writer.write(join(self.run_dir, fgas_conc_filename), self.version)
emis_config = self._fix_any_backwards_emissions_scen_key_in_config(
{"file_emissionscenario": self._scen_file_name}
)
self.set_config(
**emis_config,
rf_initialization_method="ZEROSTARTSHIFT",
rf_total_constantafteryr=10000,
file_co2i_emis="",
file_co2b_emis="",
co2_switchfromconc2emis_year=1750,
file_ch4i_emis="",
file_ch4b_emis="",
file_ch4n_emis="",
file_ch4_conc=ch4_conc_filename,
ch4_switchfromconc2emis_year=10000,
file_n2oi_emis="",
file_n2ob_emis="",
file_n2on_emis="",
file_n2o_conc="",
n2o_switchfromconc2emis_year=1750,
file_noxi_emis="",
file_noxb_emis="",
file_noxi_ot="",
file_noxb_ot="",
file_noxt_rf="",
file_soxnb_ot="",
file_soxi_ot="",
file_soxt_rf="",
file_soxi_emis="",
file_soxb_emis="",
file_soxn_emis="",
file_oci_emis="",
file_ocb_emis="",
file_oci_ot="",
file_ocb_ot="",
file_oci_rf="",
file_ocb_rf="",
file_bci_emis="",
file_bcb_emis="",
file_bci_ot="",
file_bcb_ot="",
file_bci_rf="",
file_bcb_rf="",
bcoc_switchfromrf2emis_year=1750,
file_nh3i_emis="",
file_nh3b_emis="",
file_nmvoci_emis="",
file_nmvocb_emis="",
file_coi_emis="",
file_cob_emis="",
file_mineraldust_rf="",
file_landuse_rf="",
file_bcsnow_rf="",
# rf_fgassum_scale=0, # this appears to do nothing, hence the next two lines
file_fgas_conc=[fgas_conc_filename] * 12,
fgas_switchfromconc2emis_year=10000,
rf_mhalosum_scale=0,
mhalo_switch_conc2emis_yr=1750,
stratoz_o3scale=0,
rf_volcanic_scale=0,
rf_solar_scale=0,
)
|
Set config such that radiative forcing and temperature output will be zero
This method is intended as a convenience only, it does not handle everything in
an obvious way. Adjusting the parameter settings still requires great care and
may behave unepexctedly.
|
entailment
|
def set_years(self, startyear=1765, endyear=2100):
"""
Set the start and end dates of the simulations.
Parameters
----------
startyear : int
Start year of the simulation
endyear : int
End year of the simulation
Returns
-------
dict
The contents of the namelist
"""
# TODO: test altering stepsperyear, I think 1, 2 and 24 should all work
return self.set_config(
"MAGCFG_NMLYEARS.CFG",
"nml_years",
endyear=endyear,
startyear=startyear,
stepsperyear=12,
)
|
Set the start and end dates of the simulations.
Parameters
----------
startyear : int
Start year of the simulation
endyear : int
End year of the simulation
Returns
-------
dict
The contents of the namelist
|
entailment
|
def set_output_variables(self, write_ascii=True, write_binary=False, **kwargs):
"""Set the output configuration, minimising output as much as possible
There are a number of configuration parameters which control which variables
are written to file and in which format. Limiting the variables that are
written to file can greatly speed up the running of MAGICC. By default,
calling this function without specifying any variables will disable all output
by setting all of MAGICC's ``out_xx`` flags to ``0``.
This convenience function should not be confused with ``set_config`` or
``update_config`` which allow the user to set/update the configuration flags
directly, without the more convenient syntax and default behaviour provided by
this function.
Parameters
----------
write_ascii : bool
If true, MAGICC is configured to write output files as human readable ascii files.
write_binary : bool
If true, MAGICC is configured to write binary output files. These files are much faster
to process and write, but are not human readable.
**kwargs:
List of variables to write out. A list of possible options are as follows. This
may not be a complete list.
'emissions',
'gwpemissions',
'sum_gwpemissions',
'concentrations',
'carboncycle',
'forcing',
'surfaceforcing',
'permafrost',
'temperature',
'sealevel',
'parameters',
'misc',
'lifetimes',
'timeseriesmix',
'rcpdata',
'summaryidx',
'inverseemis',
'tempoceanlayers',
'oceanarea',
'heatuptake',
'warnings',
'precipinput',
'aogcmtuning',
'ccycletuning',
'observationaltuning',
'keydata_1',
'keydata_2'
"""
assert (
write_ascii or write_binary
), "write_binary and/or write_ascii must be configured"
if write_binary and write_ascii:
ascii_binary = "BOTH"
elif write_ascii:
ascii_binary = "ASCII"
else:
ascii_binary = "BINARY"
# defaults
outconfig = {
"out_emissions": 0,
"out_gwpemissions": 0,
"out_sum_gwpemissions": 0,
"out_concentrations": 0,
"out_carboncycle": 0,
"out_forcing": 0,
"out_surfaceforcing": 0,
"out_permafrost": 0,
"out_temperature": 0,
"out_sealevel": 0,
"out_parameters": 0,
"out_misc": 0,
"out_timeseriesmix": 0,
"out_rcpdata": 0,
"out_summaryidx": 0,
"out_inverseemis": 0,
"out_tempoceanlayers": 0,
"out_heatuptake": 0,
"out_ascii_binary": ascii_binary,
"out_warnings": 0,
"out_precipinput": 0,
"out_aogcmtuning": 0,
"out_ccycletuning": 0,
"out_observationaltuning": 0,
"out_keydata_1": 0,
"out_keydata_2": 0,
}
if self.version == 7:
outconfig["out_oceanarea"] = 0
outconfig["out_lifetimes"] = 0
for kw in kwargs:
val = 1 if kwargs[kw] else 0 # convert values to 0/1 instead of booleans
outconfig["out_" + kw.lower()] = val
self.update_config(**outconfig)
|
Set the output configuration, minimising output as much as possible
There are a number of configuration parameters which control which variables
are written to file and in which format. Limiting the variables that are
written to file can greatly speed up the running of MAGICC. By default,
calling this function without specifying any variables will disable all output
by setting all of MAGICC's ``out_xx`` flags to ``0``.
This convenience function should not be confused with ``set_config`` or
``update_config`` which allow the user to set/update the configuration flags
directly, without the more convenient syntax and default behaviour provided by
this function.
Parameters
----------
write_ascii : bool
If true, MAGICC is configured to write output files as human readable ascii files.
write_binary : bool
If true, MAGICC is configured to write binary output files. These files are much faster
to process and write, but are not human readable.
**kwargs:
List of variables to write out. A list of possible options are as follows. This
may not be a complete list.
'emissions',
'gwpemissions',
'sum_gwpemissions',
'concentrations',
'carboncycle',
'forcing',
'surfaceforcing',
'permafrost',
'temperature',
'sealevel',
'parameters',
'misc',
'lifetimes',
'timeseriesmix',
'rcpdata',
'summaryidx',
'inverseemis',
'tempoceanlayers',
'oceanarea',
'heatuptake',
'warnings',
'precipinput',
'aogcmtuning',
'ccycletuning',
'observationaltuning',
'keydata_1',
'keydata_2'
|
entailment
|
def diagnose_tcr_ecs(self, **kwargs):
"""Diagnose TCR and ECS
The transient climate response (TCR), is the global-mean temperature response
at time at which atmopsheric |CO2| concentrations double in a scenario where
atmospheric |CO2| concentrations are increased at 1% per year from
pre-industrial levels.
The equilibrium climate sensitivity (ECS), is the equilibrium global-mean
temperature response to an instantaneous doubling of atmospheric |CO2|
concentrations.
As MAGICC has no hysteresis in its equilibrium response to radiative forcing,
we can diagnose TCR and ECS with one experiment. However, please note that
sometimes the run length won't be long enough to allow MAGICC's oceans to
fully equilibrate and hence the ECS value might not be what you expect (it
should match the value of ``core_climatesensitivity``).
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "ecs" - the diagnosed ECS; "tcr" - the diagnosed
TCR; "timeseries" - the relevant model input and output timeseries used in
the experiment i.e. atmospheric |CO2| concentrations, total radiative
forcing and global-mean surface temperature
"""
if self.version == 7:
raise NotImplementedError("MAGICC7 cannot yet diagnose ECS and TCR")
self._diagnose_tcr_ecs_config_setup(**kwargs)
timeseries = self.run(
scenario=None,
only=[
"Atmospheric Concentrations|CO2",
"Radiative Forcing",
"Surface Temperature",
],
)
tcr, ecs = self._get_tcr_ecs_from_diagnosis_results(timeseries)
return {"tcr": tcr, "ecs": ecs, "timeseries": timeseries}
|
Diagnose TCR and ECS
The transient climate response (TCR), is the global-mean temperature response
at time at which atmopsheric |CO2| concentrations double in a scenario where
atmospheric |CO2| concentrations are increased at 1% per year from
pre-industrial levels.
The equilibrium climate sensitivity (ECS), is the equilibrium global-mean
temperature response to an instantaneous doubling of atmospheric |CO2|
concentrations.
As MAGICC has no hysteresis in its equilibrium response to radiative forcing,
we can diagnose TCR and ECS with one experiment. However, please note that
sometimes the run length won't be long enough to allow MAGICC's oceans to
fully equilibrate and hence the ECS value might not be what you expect (it
should match the value of ``core_climatesensitivity``).
Parameters
----------
**kwargs
parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4``
Returns
-------
dict
Dictionary with keys: "ecs" - the diagnosed ECS; "tcr" - the diagnosed
TCR; "timeseries" - the relevant model input and output timeseries used in
the experiment i.e. atmospheric |CO2| concentrations, total radiative
forcing and global-mean surface temperature
|
entailment
|
def set_emission_scenario_setup(self, scenario, config_dict):
"""Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
"""
self.write(scenario, self._scen_file_name)
# can be lazy in this line as fix backwards key handles errors for us
config_dict["file_emissionscenario"] = self._scen_file_name
config_dict = self._fix_any_backwards_emissions_scen_key_in_config(config_dict)
return config_dict
|
Set the emissions flags correctly.
Parameters
----------
scenario : :obj:`pymagicc.io.MAGICCData`
Scenario to run.
config_dict : dict
Dictionary with current input configurations which is to be validated and
updated where necessary.
Returns
-------
dict
Updated configuration
|
entailment
|
def contains(value: Union[str, 'Type']) -> bool:
""" Checks if a type is defined """
if isinstance(value, str):
return any(value.lower() == i.value for i in Type)
return any(value == i for i in Type)
|
Checks if a type is defined
|
entailment
|
def xml_replace(filename, **replacements):
"""Read the content of an XML template file (XMLT), apply the given
`replacements` to its substitution markers, and write the result into
an XML file with the same name but ending with `xml` instead of `xmlt`.
First, we write an XMLT file, containing a regular HTML comment, a
readily defined element `e1`, and some other elements with
substitutions markers. Substitution markers are HTML comments
starting and ending with the `|` character:
>>> from hydpy import xml_replace, TestIO
>>> with TestIO():
... with open('test1.xmlt', 'w') as templatefile:
... _ = templatefile.write(
... '<!--a normal comment-->\\n'
... '<e1>element 1</e1>\\n'
... '<e2><!--|e2|--></e2>\\n'
... '<e3><!--|e3_|--></e3>\\n'
... '<e4><!--|e4=element 4|--></e4>\\n'
... '<e2><!--|e2|--></e2>')
Function |xml_replace| can both be called within a Python session and
from a command line. We start with the first type of application.
Each substitution marker must be met by a keyword argument unless
it holds a default value (`e4`). All arguments are converted to
a |str| object (`e3`). Template files can use the same substitution
marker multiple times (`e2`):
>>> with TestIO():
... xml_replace('test1', e2='E2', e3_=3, e4='ELEMENT 4')
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> E2 (given argument)
e3_ --> 3 (given argument)
e4 --> ELEMENT 4 (given argument)
e2 --> E2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>E2</e2>
<e3>3</e3>
<e4>ELEMENT 4</e4>
<e2>E2</e2>
Without custom values, |xml_replace| applies predefined default
values, if available (`e4`):
>>> with TestIO():
... xml_replace('test1', e2='E2', e3_=3) # doctest: +ELLIPSIS
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> E2 (given argument)
e3_ --> 3 (given argument)
e4 --> element 4 (default argument)
e2 --> E2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>E2</e2>
<e3>3</e3>
<e4>element 4</e4>
<e2>E2</e2>
Missing and useless keyword arguments result in errors:
>>> with TestIO():
... xml_replace('test1', e2='E2')
Traceback (most recent call last):
...
RuntimeError: While trying to replace the markers `e2, e3_, and e4` \
of the XML template file `test1.xmlt` with the available keywords `e2`, \
the following error occurred: Marker `e3_` cannot be replaced.
>>> with TestIO():
... xml_replace('test1', e2='e2', e3_='E3', e4='e4', e5='e5')
Traceback (most recent call last):
...
RuntimeError: While trying to replace the markers `e2, e3_, and e4` \
of the XML template file `test1.xmlt` with the available keywords `e2, e3_, \
e4, and e5`, the following error occurred: Keyword(s) `e5` cannot be used.
Using different default values for the same substitution marker
is not allowed:
>>> from hydpy import pub, TestIO, xml_replace
>>> with TestIO():
... with open('test2.xmlt', 'w') as templatefile:
... _ = templatefile.write(
... '<e4><!--|e4=element 4|--></e4>\\n'
... '<e4><!--|e4=ELEMENT 4|--></e4>')
>>> with TestIO():
... xml_replace('test2', e4=4)
template file: test2.xmlt
target file: test2.xml
replacements:
e4 --> 4 (given argument)
e4 --> 4 (given argument)
>>> with TestIO():
... with open('test2.xml') as targetfile:
... print(targetfile.read())
<e4>4</e4>
<e4>4</e4>
>>> with TestIO():
... xml_replace('test2')
Traceback (most recent call last):
...
RuntimeError: Template file `test2.xmlt` defines different default values \
for marker `e4`.
As mentioned above, function |xml_replace| is registered as a "script
function" and can thus be used via command line:
>>> pub.scriptfunctions['xml_replace'].__name__
'xml_replace'
>>> pub.scriptfunctions['xml_replace'].__module__
'hydpy.exe.replacetools'
Use script |hyd| to execute function |xml_replace|:
>>> from hydpy import run_subprocess
>>> with TestIO():
... run_subprocess(
... 'hyd.py xml_replace test1 e2="Element 2" e3_=3')
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> Element 2 (given argument)
e3_ --> 3 (given argument)
e4 --> element 4 (default argument)
e2 --> Element 2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>Element 2</e2>
<e3>3</e3>
<e4>element 4</e4>
<e2>Element 2</e2>
"""
keywords = set(replacements.keys())
templatename = f'{filename}.xmlt'
targetname = f'{filename}.xml'
print(f'template file: {templatename}')
print(f'target file: {targetname}')
print('replacements:')
with open(templatename) as templatefile:
templatebody = templatefile.read()
parts = templatebody.replace('<!--|', '|-->').split('|-->')
defaults = {}
for idx, part in enumerate(parts):
if idx % 2:
subparts = part.partition('=')
if subparts[2]:
parts[idx] = subparts[0]
if subparts[0] not in replacements:
if ((subparts[0] in defaults) and
(defaults[subparts[0]] != str(subparts[2]))):
raise RuntimeError(
f'Template file `{templatename}` defines '
f'different default values for marker '
f'`{subparts[0]}`.')
defaults[subparts[0]] = str(subparts[2])
markers = parts[1::2]
try:
unused_keywords = keywords.copy()
for idx, part in enumerate(parts):
if idx % 2:
argument_info = 'given argument'
newpart = replacements.get(part)
if newpart is None:
argument_info = 'default argument'
newpart = defaults.get(part)
if newpart is None:
raise RuntimeError(
f'Marker `{part}` cannot be replaced.')
print(f' {part} --> {newpart} ({argument_info})')
parts[idx] = str(newpart)
unused_keywords.discard(part)
targetbody = ''.join(parts)
if unused_keywords:
raise RuntimeError(
f'Keyword(s) `{objecttools.enumeration(unused_keywords)}` '
f'cannot be used.')
with open(targetname, 'w') as targetfile:
targetfile.write(targetbody)
except BaseException:
objecttools.augment_excmessage(
f'While trying to replace the markers '
f'`{objecttools.enumeration(sorted(set(markers)))}` of the '
f'XML template file `{templatename}` with the available '
f'keywords `{objecttools.enumeration(sorted(keywords))}`')
|
Read the content of an XML template file (XMLT), apply the given
`replacements` to its substitution markers, and write the result into
an XML file with the same name but ending with `xml` instead of `xmlt`.
First, we write an XMLT file, containing a regular HTML comment, a
readily defined element `e1`, and some other elements with
substitutions markers. Substitution markers are HTML comments
starting and ending with the `|` character:
>>> from hydpy import xml_replace, TestIO
>>> with TestIO():
... with open('test1.xmlt', 'w') as templatefile:
... _ = templatefile.write(
... '<!--a normal comment-->\\n'
... '<e1>element 1</e1>\\n'
... '<e2><!--|e2|--></e2>\\n'
... '<e3><!--|e3_|--></e3>\\n'
... '<e4><!--|e4=element 4|--></e4>\\n'
... '<e2><!--|e2|--></e2>')
Function |xml_replace| can both be called within a Python session and
from a command line. We start with the first type of application.
Each substitution marker must be met by a keyword argument unless
it holds a default value (`e4`). All arguments are converted to
a |str| object (`e3`). Template files can use the same substitution
marker multiple times (`e2`):
>>> with TestIO():
... xml_replace('test1', e2='E2', e3_=3, e4='ELEMENT 4')
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> E2 (given argument)
e3_ --> 3 (given argument)
e4 --> ELEMENT 4 (given argument)
e2 --> E2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>E2</e2>
<e3>3</e3>
<e4>ELEMENT 4</e4>
<e2>E2</e2>
Without custom values, |xml_replace| applies predefined default
values, if available (`e4`):
>>> with TestIO():
... xml_replace('test1', e2='E2', e3_=3) # doctest: +ELLIPSIS
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> E2 (given argument)
e3_ --> 3 (given argument)
e4 --> element 4 (default argument)
e2 --> E2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>E2</e2>
<e3>3</e3>
<e4>element 4</e4>
<e2>E2</e2>
Missing and useless keyword arguments result in errors:
>>> with TestIO():
... xml_replace('test1', e2='E2')
Traceback (most recent call last):
...
RuntimeError: While trying to replace the markers `e2, e3_, and e4` \
of the XML template file `test1.xmlt` with the available keywords `e2`, \
the following error occurred: Marker `e3_` cannot be replaced.
>>> with TestIO():
... xml_replace('test1', e2='e2', e3_='E3', e4='e4', e5='e5')
Traceback (most recent call last):
...
RuntimeError: While trying to replace the markers `e2, e3_, and e4` \
of the XML template file `test1.xmlt` with the available keywords `e2, e3_, \
e4, and e5`, the following error occurred: Keyword(s) `e5` cannot be used.
Using different default values for the same substitution marker
is not allowed:
>>> from hydpy import pub, TestIO, xml_replace
>>> with TestIO():
... with open('test2.xmlt', 'w') as templatefile:
... _ = templatefile.write(
... '<e4><!--|e4=element 4|--></e4>\\n'
... '<e4><!--|e4=ELEMENT 4|--></e4>')
>>> with TestIO():
... xml_replace('test2', e4=4)
template file: test2.xmlt
target file: test2.xml
replacements:
e4 --> 4 (given argument)
e4 --> 4 (given argument)
>>> with TestIO():
... with open('test2.xml') as targetfile:
... print(targetfile.read())
<e4>4</e4>
<e4>4</e4>
>>> with TestIO():
... xml_replace('test2')
Traceback (most recent call last):
...
RuntimeError: Template file `test2.xmlt` defines different default values \
for marker `e4`.
As mentioned above, function |xml_replace| is registered as a "script
function" and can thus be used via command line:
>>> pub.scriptfunctions['xml_replace'].__name__
'xml_replace'
>>> pub.scriptfunctions['xml_replace'].__module__
'hydpy.exe.replacetools'
Use script |hyd| to execute function |xml_replace|:
>>> from hydpy import run_subprocess
>>> with TestIO():
... run_subprocess(
... 'hyd.py xml_replace test1 e2="Element 2" e3_=3')
template file: test1.xmlt
target file: test1.xml
replacements:
e2 --> Element 2 (given argument)
e3_ --> 3 (given argument)
e4 --> element 4 (default argument)
e2 --> Element 2 (given argument)
>>> with TestIO():
... with open('test1.xml') as targetfile:
... print(targetfile.read())
<!--a normal comment-->
<e1>element 1</e1>
<e2>Element 2</e2>
<e3>3</e3>
<e4>element 4</e4>
<e2>Element 2</e2>
|
entailment
|
def _calcidxs(func):
"""Return the required indexes based on the given lambda function
and the |Timegrids| object handled by module |pub|. Raise a
|RuntimeError| if the latter is not available.
"""
timegrids = hydpy.pub.get('timegrids')
if timegrids is None:
raise RuntimeError(
'An Indexer object has been asked for an %s array. Such an '
'array has neither been determined yet nor can it be '
'determined automatically at the moment. Either define an '
'%s array manually and pass it to the Indexer object, or make '
'a proper Timegrids object available within the pub module. '
'In usual HydPy applications, the latter is done '
'automatically.'
% (func.__name__, func.__name__))
idxs = numpy.empty(len(timegrids.init), dtype=int)
for jdx, date in enumerate(hydpy.pub.timegrids.init):
idxs[jdx] = func(date)
return idxs
|
Return the required indexes based on the given lambda function
and the |Timegrids| object handled by module |pub|. Raise a
|RuntimeError| if the latter is not available.
|
entailment
|
def dayofyear(self):
"""Day of the year index (the first of January = 0...).
For reasons of consistency between leap years and non-leap years,
assuming a daily time step, index 59 is always associated with the
29th of February. Hence, it is missing in non-leap years:
>>> from hydpy import pub
>>> from hydpy.core.indextools import Indexer
>>> pub.timegrids = '27.02.2004', '3.03.2004', '1d'
>>> Indexer().dayofyear
array([57, 58, 59, 60, 61])
>>> pub.timegrids = '27.02.2005', '3.03.2005', '1d'
>>> Indexer().dayofyear
array([57, 58, 60, 61])
"""
def _dayofyear(date):
return (date.dayofyear-1 +
((date.month > 2) and (not date.leapyear)))
return _dayofyear
|
Day of the year index (the first of January = 0...).
For reasons of consistency between leap years and non-leap years,
assuming a daily time step, index 59 is always associated with the
29th of February. Hence, it is missing in non-leap years:
>>> from hydpy import pub
>>> from hydpy.core.indextools import Indexer
>>> pub.timegrids = '27.02.2004', '3.03.2004', '1d'
>>> Indexer().dayofyear
array([57, 58, 59, 60, 61])
>>> pub.timegrids = '27.02.2005', '3.03.2005', '1d'
>>> Indexer().dayofyear
array([57, 58, 60, 61])
|
entailment
|
def timeofyear(self):
"""Time of the year index (first simulation step of each year = 0...).
The property |Indexer.timeofyear| is best explained through
comparing it with property |Indexer.dayofyear|:
Let us reconsider one of the examples of the documentation on
property |Indexer.dayofyear|:
>>> from hydpy import pub
>>> from hydpy import Timegrids, Timegrid
>>> from hydpy.core.indextools import Indexer
>>> pub.timegrids = '27.02.2005', '3.03.2005', '1d'
Due to the simulation stepsize being one day, the index arrays
calculated by both properties are identical:
>>> Indexer().dayofyear
array([57, 58, 60, 61])
>>> Indexer().timeofyear
array([57, 58, 60, 61])
In the next example the step size is halved:
>>> pub.timegrids = '27.02.2005', '3.03.2005', '12h'
Now the there a generally two subsequent simulation steps associated
with the same day:
>>> Indexer().dayofyear
array([57, 57, 58, 58, 60, 60, 61, 61])
However, the `timeofyear` array gives the index of the
respective simulation steps of the actual year:
>>> Indexer().timeofyear
array([114, 115, 116, 117, 120, 121, 122, 123])
Note the gap in the returned index array due to 2005 being not a
leap year.
"""
refgrid = timetools.Timegrid(
timetools.Date('2000.01.01'),
timetools.Date('2001.01.01'),
hydpy.pub.timegrids.stepsize)
def _timeofyear(date):
date = copy.deepcopy(date)
date.year = 2000
return refgrid[date]
return _timeofyear
|
Time of the year index (first simulation step of each year = 0...).
The property |Indexer.timeofyear| is best explained through
comparing it with property |Indexer.dayofyear|:
Let us reconsider one of the examples of the documentation on
property |Indexer.dayofyear|:
>>> from hydpy import pub
>>> from hydpy import Timegrids, Timegrid
>>> from hydpy.core.indextools import Indexer
>>> pub.timegrids = '27.02.2005', '3.03.2005', '1d'
Due to the simulation stepsize being one day, the index arrays
calculated by both properties are identical:
>>> Indexer().dayofyear
array([57, 58, 60, 61])
>>> Indexer().timeofyear
array([57, 58, 60, 61])
In the next example the step size is halved:
>>> pub.timegrids = '27.02.2005', '3.03.2005', '12h'
Now the there a generally two subsequent simulation steps associated
with the same day:
>>> Indexer().dayofyear
array([57, 57, 58, 58, 60, 60, 61, 61])
However, the `timeofyear` array gives the index of the
respective simulation steps of the actual year:
>>> Indexer().timeofyear
array([114, 115, 116, 117, 120, 121, 122, 123])
Note the gap in the returned index array due to 2005 being not a
leap year.
|
entailment
|
def set_doc(self, doc: str):
"""Assign the given docstring to the property instance and, if
possible, to the `__test__` dictionary of the module of its
owner class."""
self.__doc__ = doc
if hasattr(self, 'module'):
ref = f'{self.objtype.__name__}.{self.name}'
self.module.__dict__['__test__'][ref] = doc
|
Assign the given docstring to the property instance and, if
possible, to the `__test__` dictionary of the module of its
owner class.
|
entailment
|
def getter_(self, fget) -> 'BaseProperty':
"""Add the given getter function and its docstring to the
property and return it."""
self.fget = fget
self.set_doc(fget.__doc__)
return self
|
Add the given getter function and its docstring to the
property and return it.
|
entailment
|
def isready(self, obj) -> bool:
"""Return |True| or |False| to indicate if the protected
property is ready for the given object. If the object is
unknow, |ProtectedProperty| returns |False|."""
return vars(obj).get(self.name, False)
|
Return |True| or |False| to indicate if the protected
property is ready for the given object. If the object is
unknow, |ProtectedProperty| returns |False|.
|
entailment
|
def allready(self, obj) -> bool:
"""Return |True| or |False| to indicate whether all protected
properties are ready or not."""
for prop in self.__properties:
if not prop.isready(obj):
return False
return True
|
Return |True| or |False| to indicate whether all protected
properties are ready or not.
|
entailment
|
def call_fget(self, obj) -> Any:
"""Return the predefined custom value when available, otherwise,
the value defined by the getter function."""
custom = vars(obj).get(self.name)
if custom is None:
return self.fget(obj)
return custom
|
Return the predefined custom value when available, otherwise,
the value defined by the getter function.
|
entailment
|
def call_fset(self, obj, value) -> None:
"""Store the given custom value and call the setter function."""
vars(obj)[self.name] = self.fset(obj, value)
|
Store the given custom value and call the setter function.
|
entailment
|
def call_fdel(self, obj) -> None:
"""Remove the predefined custom value and call the delete function."""
self.fdel(obj)
try:
del vars(obj)[self.name]
except KeyError:
pass
|
Remove the predefined custom value and call the delete function.
|
entailment
|
def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with :math:`RelWB \\leq RelWZ`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(3)
>>> lnk(ACKER)
>>> relwb.values = 0.5
>>> relwz(0.2, 0.5, 0.8)
>>> relwz
relwz(0.5, 0.5, 0.8)
"""
if lower is None:
lower = getattr(self.subpars.relwb, 'value', None)
lland_parameters.ParameterSoil.trim(self, lower, upper)
|
Trim upper values in accordance with :math:`RelWB \\leq RelWZ`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(3)
>>> lnk(ACKER)
>>> relwb.values = 0.5
>>> relwz(0.2, 0.5, 0.8)
>>> relwz
relwz(0.5, 0.5, 0.8)
|
entailment
|
def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with :math:`RelWB \\leq RelWZ`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(3)
>>> lnk(ACKER)
>>> relwz.values = 0.5
>>> relwb(0.2, 0.5, 0.8)
>>> relwb
relwb(0.2, 0.5, 0.5)
"""
if upper is None:
upper = getattr(self.subpars.relwz, 'value', None)
lland_parameters.ParameterSoil.trim(self, lower, upper)
|
Trim upper values in accordance with :math:`RelWB \\leq RelWZ`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> nhru(3)
>>> lnk(ACKER)
>>> relwz.values = 0.5
>>> relwb(0.2, 0.5, 0.8)
>>> relwb
relwb(0.2, 0.5, 0.5)
|
entailment
|
def trim(self, lower=None, upper=None):
"""Trim upper values in accordance with :math:`EQI1 \\leq EQB`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqi1.value = 2.0
>>> eqb(1.0)
>>> eqb
eqb(2.0)
>>> eqb(2.0)
>>> eqb
eqb(2.0)
>>> eqb(3.0)
>>> eqb
eqb(3.0)
"""
if lower is None:
lower = getattr(self.subpars.eqi1, 'value', None)
super().trim(lower, upper)
|
Trim upper values in accordance with :math:`EQI1 \\leq EQB`.
>>> from hydpy.models.lland import *
>>> parameterstep('1d')
>>> eqi1.value = 2.0
>>> eqb(1.0)
>>> eqb
eqb(2.0)
>>> eqb(2.0)
>>> eqb
eqb(2.0)
>>> eqb(3.0)
>>> eqb
eqb(3.0)
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.