repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
svinota/mdns
mdns/zeroconf.py
DNSService.write
def write(self, out): """Used in constructing an outgoing packet""" out.write_short(self.priority) out.write_short(self.weight) out.write_short(self.port) out.write_name(self.server)
python
def write(self, out): """Used in constructing an outgoing packet""" out.write_short(self.priority) out.write_short(self.weight) out.write_short(self.port) out.write_name(self.server)
[ "def", "write", "(", "self", ",", "out", ")", ":", "out", ".", "write_short", "(", "self", ".", "priority", ")", "out", ".", "write_short", "(", "self", ".", "weight", ")", "out", ".", "write_short", "(", "self", ".", "port", ")", "out", ".", "write_name", "(", "self", ".", "server", ")" ]
Used in constructing an outgoing packet
[ "Used", "in", "constructing", "an", "outgoing", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L666-L671
svinota/mdns
mdns/zeroconf.py
DNSIncoming.read_header
def read_header(self): """Reads header portion of packet""" format = '!HHHHHH' length = struct.calcsize(format) info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length self.id = info[0] self.flags = info[1] self.num_questions = info[2] self.num_answers = info[3] self.num_authorities = info[4] self.num_additionals = info[5]
python
def read_header(self): """Reads header portion of packet""" format = '!HHHHHH' length = struct.calcsize(format) info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length self.id = info[0] self.flags = info[1] self.num_questions = info[2] self.num_answers = info[3] self.num_authorities = info[4] self.num_additionals = info[5]
[ "def", "read_header", "(", "self", ")", ":", "format", "=", "'!HHHHHH'", "length", "=", "struct", ".", "calcsize", "(", "format", ")", "info", "=", "struct", ".", "unpack", "(", "format", ",", "self", ".", "data", "[", "self", ".", "offset", ":", "self", ".", "offset", "+", "length", "]", ")", "self", ".", "offset", "+=", "length", "self", ".", "id", "=", "info", "[", "0", "]", "self", ".", "flags", "=", "info", "[", "1", "]", "self", ".", "num_questions", "=", "info", "[", "2", "]", "self", ".", "num_answers", "=", "info", "[", "3", "]", "self", ".", "num_authorities", "=", "info", "[", "4", "]", "self", ".", "num_additionals", "=", "info", "[", "5", "]" ]
Reads header portion of packet
[ "Reads", "header", "portion", "of", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L708-L721
svinota/mdns
mdns/zeroconf.py
DNSIncoming.read_questions
def read_questions(self): """Reads questions section of packet""" format = '!HH' length = struct.calcsize(format) for i in range(0, self.num_questions): name = self.read_name() info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length question = DNSQuestion(name, info[0], info[1]) self.questions.append(question)
python
def read_questions(self): """Reads questions section of packet""" format = '!HH' length = struct.calcsize(format) for i in range(0, self.num_questions): name = self.read_name() info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length question = DNSQuestion(name, info[0], info[1]) self.questions.append(question)
[ "def", "read_questions", "(", "self", ")", ":", "format", "=", "'!HH'", "length", "=", "struct", ".", "calcsize", "(", "format", ")", "for", "i", "in", "range", "(", "0", ",", "self", ".", "num_questions", ")", ":", "name", "=", "self", ".", "read_name", "(", ")", "info", "=", "struct", ".", "unpack", "(", "format", ",", "self", ".", "data", "[", "self", ".", "offset", ":", "self", ".", "offset", "+", "length", "]", ")", "self", ".", "offset", "+=", "length", "question", "=", "DNSQuestion", "(", "name", ",", "info", "[", "0", "]", ",", "info", "[", "1", "]", ")", "self", ".", "questions", ".", "append", "(", "question", ")" ]
Reads questions section of packet
[ "Reads", "questions", "section", "of", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L723-L734
svinota/mdns
mdns/zeroconf.py
DNSIncoming.read_int
def read_int(self): """Reads an integer from the packet""" format = '!I' length = struct.calcsize(format) info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length return info[0]
python
def read_int(self): """Reads an integer from the packet""" format = '!I' length = struct.calcsize(format) info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length return info[0]
[ "def", "read_int", "(", "self", ")", ":", "format", "=", "'!I'", "length", "=", "struct", ".", "calcsize", "(", "format", ")", "info", "=", "struct", ".", "unpack", "(", "format", ",", "self", ".", "data", "[", "self", ".", "offset", ":", "self", ".", "offset", "+", "length", "]", ")", "self", ".", "offset", "+=", "length", "return", "info", "[", "0", "]" ]
Reads an integer from the packet
[ "Reads", "an", "integer", "from", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L736-L743
svinota/mdns
mdns/zeroconf.py
DNSIncoming.read_character_string
def read_character_string(self): """Reads a character string from the packet""" length = ord(self.data[self.offset]) self.offset += 1 return self.read_string(length)
python
def read_character_string(self): """Reads a character string from the packet""" length = ord(self.data[self.offset]) self.offset += 1 return self.read_string(length)
[ "def", "read_character_string", "(", "self", ")", ":", "length", "=", "ord", "(", "self", ".", "data", "[", "self", ".", "offset", "]", ")", "self", ".", "offset", "+=", "1", "return", "self", ".", "read_string", "(", "length", ")" ]
Reads a character string from the packet
[ "Reads", "a", "character", "string", "from", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L745-L749
svinota/mdns
mdns/zeroconf.py
DNSIncoming.read_string
def read_string(self, len): """Reads a string of a given length from the packet""" format = '!' + str(len) + 's' length = struct.calcsize(format) info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length return info[0]
python
def read_string(self, len): """Reads a string of a given length from the packet""" format = '!' + str(len) + 's' length = struct.calcsize(format) info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length return info[0]
[ "def", "read_string", "(", "self", ",", "len", ")", ":", "format", "=", "'!'", "+", "str", "(", "len", ")", "+", "'s'", "length", "=", "struct", ".", "calcsize", "(", "format", ")", "info", "=", "struct", ".", "unpack", "(", "format", ",", "self", ".", "data", "[", "self", ".", "offset", ":", "self", ".", "offset", "+", "length", "]", ")", "self", ".", "offset", "+=", "length", "return", "info", "[", "0", "]" ]
Reads a string of a given length from the packet
[ "Reads", "a", "string", "of", "a", "given", "length", "from", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L751-L758
svinota/mdns
mdns/zeroconf.py
DNSIncoming.read_others
def read_others(self): """Reads the answers, authorities and additionals section of the packet""" format = '!HHiH' length = struct.calcsize(format) n = self.num_answers + self.num_authorities + self.num_additionals for i in range(0, n): domain = self.read_name() info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length rec = None if info[0] == _TYPE_A: rec = DNSAddress(domain, info[0], info[1], info[2], self.read_string(4)) elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR: rec = DNSPointer(domain, info[0], info[1], info[2], self.read_name()) elif info[0] == _TYPE_TXT: rec = DNSText(domain, info[0], info[1], info[2], self.read_string(info[3])) elif info[0] == _TYPE_SRV: rec = DNSService(domain, info[0], info[1], info[2], self.read_unsigned_short(), self.read_unsigned_short(), self.read_unsigned_short(), self.read_name()) elif info[0] == _TYPE_HINFO: rec = DNSHinfo(domain, info[0], info[1], info[2], self.read_character_string(), self.read_character_string()) elif info[0] == _TYPE_RRSIG: rec = DNSSignatureI(domain, info[0], info[1], info[2], self.read_string(18), self.read_name(), self.read_character_string()) elif info[0] == _TYPE_AAAA: rec = DNSAddress(domain, info[0], info[1], info[2], self.read_string(16)) else: # Try to ignore types we don't know about # this may mean the rest of the name is # unable to be parsed, and may show errors # so this is left for debugging. New types # encountered need to be parsed properly. # #print "UNKNOWN TYPE = " + str(info[0]) #raise BadTypeInNameException pass if rec is not None: self.answers.append(rec)
python
def read_others(self): """Reads the answers, authorities and additionals section of the packet""" format = '!HHiH' length = struct.calcsize(format) n = self.num_answers + self.num_authorities + self.num_additionals for i in range(0, n): domain = self.read_name() info = struct.unpack(format, self.data[self.offset:self.offset + length]) self.offset += length rec = None if info[0] == _TYPE_A: rec = DNSAddress(domain, info[0], info[1], info[2], self.read_string(4)) elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR: rec = DNSPointer(domain, info[0], info[1], info[2], self.read_name()) elif info[0] == _TYPE_TXT: rec = DNSText(domain, info[0], info[1], info[2], self.read_string(info[3])) elif info[0] == _TYPE_SRV: rec = DNSService(domain, info[0], info[1], info[2], self.read_unsigned_short(), self.read_unsigned_short(), self.read_unsigned_short(), self.read_name()) elif info[0] == _TYPE_HINFO: rec = DNSHinfo(domain, info[0], info[1], info[2], self.read_character_string(), self.read_character_string()) elif info[0] == _TYPE_RRSIG: rec = DNSSignatureI(domain, info[0], info[1], info[2], self.read_string(18), self.read_name(), self.read_character_string()) elif info[0] == _TYPE_AAAA: rec = DNSAddress(domain, info[0], info[1], info[2], self.read_string(16)) else: # Try to ignore types we don't know about # this may mean the rest of the name is # unable to be parsed, and may show errors # so this is left for debugging. New types # encountered need to be parsed properly. # #print "UNKNOWN TYPE = " + str(info[0]) #raise BadTypeInNameException pass if rec is not None: self.answers.append(rec)
[ "def", "read_others", "(", "self", ")", ":", "format", "=", "'!HHiH'", "length", "=", "struct", ".", "calcsize", "(", "format", ")", "n", "=", "self", ".", "num_answers", "+", "self", ".", "num_authorities", "+", "self", ".", "num_additionals", "for", "i", "in", "range", "(", "0", ",", "n", ")", ":", "domain", "=", "self", ".", "read_name", "(", ")", "info", "=", "struct", ".", "unpack", "(", "format", ",", "self", ".", "data", "[", "self", ".", "offset", ":", "self", ".", "offset", "+", "length", "]", ")", "self", ".", "offset", "+=", "length", "rec", "=", "None", "if", "info", "[", "0", "]", "==", "_TYPE_A", ":", "rec", "=", "DNSAddress", "(", "domain", ",", "info", "[", "0", "]", ",", "info", "[", "1", "]", ",", "info", "[", "2", "]", ",", "self", ".", "read_string", "(", "4", ")", ")", "elif", "info", "[", "0", "]", "==", "_TYPE_CNAME", "or", "info", "[", "0", "]", "==", "_TYPE_PTR", ":", "rec", "=", "DNSPointer", "(", "domain", ",", "info", "[", "0", "]", ",", "info", "[", "1", "]", ",", "info", "[", "2", "]", ",", "self", ".", "read_name", "(", ")", ")", "elif", "info", "[", "0", "]", "==", "_TYPE_TXT", ":", "rec", "=", "DNSText", "(", "domain", ",", "info", "[", "0", "]", ",", "info", "[", "1", "]", ",", "info", "[", "2", "]", ",", "self", ".", "read_string", "(", "info", "[", "3", "]", ")", ")", "elif", "info", "[", "0", "]", "==", "_TYPE_SRV", ":", "rec", "=", "DNSService", "(", "domain", ",", "info", "[", "0", "]", ",", "info", "[", "1", "]", ",", "info", "[", "2", "]", ",", "self", ".", "read_unsigned_short", "(", ")", ",", "self", ".", "read_unsigned_short", "(", ")", ",", "self", ".", "read_unsigned_short", "(", ")", ",", "self", ".", "read_name", "(", ")", ")", "elif", "info", "[", "0", "]", "==", "_TYPE_HINFO", ":", "rec", "=", "DNSHinfo", "(", "domain", ",", "info", "[", "0", "]", ",", "info", "[", "1", "]", ",", "info", "[", "2", "]", ",", "self", ".", "read_character_string", "(", ")", ",", "self", ".", "read_character_string", "(", ")", ")", "elif", "info", "[", "0", "]", "==", "_TYPE_RRSIG", ":", "rec", "=", "DNSSignatureI", "(", "domain", ",", "info", "[", "0", "]", ",", "info", "[", "1", "]", ",", "info", "[", "2", "]", ",", "self", ".", "read_string", "(", "18", ")", ",", "self", ".", "read_name", "(", ")", ",", "self", ".", "read_character_string", "(", ")", ")", "elif", "info", "[", "0", "]", "==", "_TYPE_AAAA", ":", "rec", "=", "DNSAddress", "(", "domain", ",", "info", "[", "0", "]", ",", "info", "[", "1", "]", ",", "info", "[", "2", "]", ",", "self", ".", "read_string", "(", "16", ")", ")", "else", ":", "# Try to ignore types we don't know about", "# this may mean the rest of the name is", "# unable to be parsed, and may show errors", "# so this is left for debugging. New types", "# encountered need to be parsed properly.", "#", "#print \"UNKNOWN TYPE = \" + str(info[0])", "#raise BadTypeInNameException", "pass", "if", "rec", "is", "not", "None", ":", "self", ".", "answers", ".", "append", "(", "rec", ")" ]
Reads the answers, authorities and additionals section of the packet
[ "Reads", "the", "answers", "authorities", "and", "additionals", "section", "of", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L769-L828
svinota/mdns
mdns/zeroconf.py
DNSIncoming.read_utf
def read_utf(self, offset, len): """Reads a UTF-8 string of a given length from the packet""" try: result = self.data[offset:offset + len].decode('utf-8') except UnicodeDecodeError: result = str('') return result
python
def read_utf(self, offset, len): """Reads a UTF-8 string of a given length from the packet""" try: result = self.data[offset:offset + len].decode('utf-8') except UnicodeDecodeError: result = str('') return result
[ "def", "read_utf", "(", "self", ",", "offset", ",", "len", ")", ":", "try", ":", "result", "=", "self", ".", "data", "[", "offset", ":", "offset", "+", "len", "]", ".", "decode", "(", "'utf-8'", ")", "except", "UnicodeDecodeError", ":", "result", "=", "str", "(", "''", ")", "return", "result" ]
Reads a UTF-8 string of a given length from the packet
[ "Reads", "a", "UTF", "-", "8", "string", "of", "a", "given", "length", "from", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L838-L844
svinota/mdns
mdns/zeroconf.py
DNSIncoming.read_name
def read_name(self): """Reads a domain name from the packet""" result = '' off = self.offset next = -1 first = off while 1: len = ord(self.data[off]) off += 1 if len == 0: break t = len & 0xC0 if t == 0x00: result = ''.join((result, self.read_utf(off, len) + '.')) off += len elif t == 0xC0: if next < 0: next = off + 1 off = ((len & 0x3F) << 8) | ord(self.data[off]) if off >= first: raise Exception( "Bad domain name (circular) at " + str(off)) first = off else: raise Exception("Bad domain name at " + str(off)) if next >= 0: self.offset = next else: self.offset = off return result
python
def read_name(self): """Reads a domain name from the packet""" result = '' off = self.offset next = -1 first = off while 1: len = ord(self.data[off]) off += 1 if len == 0: break t = len & 0xC0 if t == 0x00: result = ''.join((result, self.read_utf(off, len) + '.')) off += len elif t == 0xC0: if next < 0: next = off + 1 off = ((len & 0x3F) << 8) | ord(self.data[off]) if off >= first: raise Exception( "Bad domain name (circular) at " + str(off)) first = off else: raise Exception("Bad domain name at " + str(off)) if next >= 0: self.offset = next else: self.offset = off return result
[ "def", "read_name", "(", "self", ")", ":", "result", "=", "''", "off", "=", "self", ".", "offset", "next", "=", "-", "1", "first", "=", "off", "while", "1", ":", "len", "=", "ord", "(", "self", ".", "data", "[", "off", "]", ")", "off", "+=", "1", "if", "len", "==", "0", ":", "break", "t", "=", "len", "&", "0xC0", "if", "t", "==", "0x00", ":", "result", "=", "''", ".", "join", "(", "(", "result", ",", "self", ".", "read_utf", "(", "off", ",", "len", ")", "+", "'.'", ")", ")", "off", "+=", "len", "elif", "t", "==", "0xC0", ":", "if", "next", "<", "0", ":", "next", "=", "off", "+", "1", "off", "=", "(", "(", "len", "&", "0x3F", ")", "<<", "8", ")", "|", "ord", "(", "self", ".", "data", "[", "off", "]", ")", "if", "off", ">=", "first", ":", "raise", "Exception", "(", "\"Bad domain name (circular) at \"", "+", "str", "(", "off", ")", ")", "first", "=", "off", "else", ":", "raise", "Exception", "(", "\"Bad domain name at \"", "+", "str", "(", "off", ")", ")", "if", "next", ">=", "0", ":", "self", ".", "offset", "=", "next", "else", ":", "self", ".", "offset", "=", "off", "return", "result" ]
Reads a domain name from the packet
[ "Reads", "a", "domain", "name", "from", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L846-L878
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.add_answer
def add_answer(self, inp, record): """Adds an answer""" if not record.suppressed_by(inp): self.add_answer_at_time(record, 0)
python
def add_answer(self, inp, record): """Adds an answer""" if not record.suppressed_by(inp): self.add_answer_at_time(record, 0)
[ "def", "add_answer", "(", "self", ",", "inp", ",", "record", ")", ":", "if", "not", "record", ".", "suppressed_by", "(", "inp", ")", ":", "self", ".", "add_answer_at_time", "(", "record", ",", "0", ")" ]
Adds an answer
[ "Adds", "an", "answer" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L902-L905
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.add_answer_at_time
def add_answer_at_time(self, record, now): """Adds an answer if if does not expire by a certain time""" if record is not None: if now == 0 or not record.is_expired(now): self.answers.append((record, now)) if record.rrsig is not None: self.answers.append((record.rrsig, now))
python
def add_answer_at_time(self, record, now): """Adds an answer if if does not expire by a certain time""" if record is not None: if now == 0 or not record.is_expired(now): self.answers.append((record, now)) if record.rrsig is not None: self.answers.append((record.rrsig, now))
[ "def", "add_answer_at_time", "(", "self", ",", "record", ",", "now", ")", ":", "if", "record", "is", "not", "None", ":", "if", "now", "==", "0", "or", "not", "record", ".", "is_expired", "(", "now", ")", ":", "self", ".", "answers", ".", "append", "(", "(", "record", ",", "now", ")", ")", "if", "record", ".", "rrsig", "is", "not", "None", ":", "self", ".", "answers", ".", "append", "(", "(", "record", ".", "rrsig", ",", "now", ")", ")" ]
Adds an answer if if does not expire by a certain time
[ "Adds", "an", "answer", "if", "if", "does", "not", "expire", "by", "a", "certain", "time" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L907-L913
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.write_byte
def write_byte(self, value): """Writes a single byte to the packet""" format = '!B' self.data.append(struct.pack(format, value)) self.size += 1
python
def write_byte(self, value): """Writes a single byte to the packet""" format = '!B' self.data.append(struct.pack(format, value)) self.size += 1
[ "def", "write_byte", "(", "self", ",", "value", ")", ":", "format", "=", "'!B'", "self", ".", "data", ".", "append", "(", "struct", ".", "pack", "(", "format", ",", "value", ")", ")", "self", ".", "size", "+=", "1" ]
Writes a single byte to the packet
[ "Writes", "a", "single", "byte", "to", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L923-L927
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.insert_short
def insert_short(self, index, value): """Inserts an unsigned short in a certain position in the packet""" format = '!H' self.data.insert(index, struct.pack(format, value)) self.size += 2
python
def insert_short(self, index, value): """Inserts an unsigned short in a certain position in the packet""" format = '!H' self.data.insert(index, struct.pack(format, value)) self.size += 2
[ "def", "insert_short", "(", "self", ",", "index", ",", "value", ")", ":", "format", "=", "'!H'", "self", ".", "data", ".", "insert", "(", "index", ",", "struct", ".", "pack", "(", "format", ",", "value", ")", ")", "self", ".", "size", "+=", "2" ]
Inserts an unsigned short in a certain position in the packet
[ "Inserts", "an", "unsigned", "short", "in", "a", "certain", "position", "in", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L935-L939
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.write_int
def write_int(self, value): """Writes an unsigned integer to the packet""" format = '!I' self.data.append(struct.pack(format, int(value))) self.size += 4
python
def write_int(self, value): """Writes an unsigned integer to the packet""" format = '!I' self.data.append(struct.pack(format, int(value))) self.size += 4
[ "def", "write_int", "(", "self", ",", "value", ")", ":", "format", "=", "'!I'", "self", ".", "data", ".", "append", "(", "struct", ".", "pack", "(", "format", ",", "int", "(", "value", ")", ")", ")", "self", ".", "size", "+=", "4" ]
Writes an unsigned integer to the packet
[ "Writes", "an", "unsigned", "integer", "to", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L947-L951
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.write_string
def write_string(self, value, length): """Writes a string to the packet""" format = '!' + str(length) + 's' self.data.append(struct.pack(format, value)) self.size += length
python
def write_string(self, value, length): """Writes a string to the packet""" format = '!' + str(length) + 's' self.data.append(struct.pack(format, value)) self.size += length
[ "def", "write_string", "(", "self", ",", "value", ",", "length", ")", ":", "format", "=", "'!'", "+", "str", "(", "length", ")", "+", "'s'", "self", ".", "data", ".", "append", "(", "struct", ".", "pack", "(", "format", ",", "value", ")", ")", "self", ".", "size", "+=", "length" ]
Writes a string to the packet
[ "Writes", "a", "string", "to", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L953-L957
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.write_utf
def write_utf(self, s): """Writes a UTF-8 string of a given length to the packet""" utfstr = s.encode('utf-8') length = len(utfstr) if length > 64: raise NamePartTooLongException self.write_byte(length) self.write_string(utfstr, length)
python
def write_utf(self, s): """Writes a UTF-8 string of a given length to the packet""" utfstr = s.encode('utf-8') length = len(utfstr) if length > 64: raise NamePartTooLongException self.write_byte(length) self.write_string(utfstr, length)
[ "def", "write_utf", "(", "self", ",", "s", ")", ":", "utfstr", "=", "s", ".", "encode", "(", "'utf-8'", ")", "length", "=", "len", "(", "utfstr", ")", "if", "length", ">", "64", ":", "raise", "NamePartTooLongException", "self", ".", "write_byte", "(", "length", ")", "self", ".", "write_string", "(", "utfstr", ",", "length", ")" ]
Writes a UTF-8 string of a given length to the packet
[ "Writes", "a", "UTF", "-", "8", "string", "of", "a", "given", "length", "to", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L959-L966
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.write_name
def write_name(self, name): """Writes a domain name to the packet""" try: # Find existing instance of this name in packet # index = self.names[name] except KeyError: # No record of this name already, so write it # out as normal, recording the location of the name # for future pointers to it. # self.names[name] = self.size parts = name.split('.') if parts[-1] == '': parts = parts[:-1] for part in parts: self.write_utf(part) self.write_byte(0) return # An index was found, so write a pointer to it # self.write_byte((index >> 8) | 0xC0) self.write_byte(index)
python
def write_name(self, name): """Writes a domain name to the packet""" try: # Find existing instance of this name in packet # index = self.names[name] except KeyError: # No record of this name already, so write it # out as normal, recording the location of the name # for future pointers to it. # self.names[name] = self.size parts = name.split('.') if parts[-1] == '': parts = parts[:-1] for part in parts: self.write_utf(part) self.write_byte(0) return # An index was found, so write a pointer to it # self.write_byte((index >> 8) | 0xC0) self.write_byte(index)
[ "def", "write_name", "(", "self", ",", "name", ")", ":", "try", ":", "# Find existing instance of this name in packet", "#", "index", "=", "self", ".", "names", "[", "name", "]", "except", "KeyError", ":", "# No record of this name already, so write it", "# out as normal, recording the location of the name", "# for future pointers to it.", "#", "self", ".", "names", "[", "name", "]", "=", "self", ".", "size", "parts", "=", "name", ".", "split", "(", "'.'", ")", "if", "parts", "[", "-", "1", "]", "==", "''", ":", "parts", "=", "parts", "[", ":", "-", "1", "]", "for", "part", "in", "parts", ":", "self", ".", "write_utf", "(", "part", ")", "self", ".", "write_byte", "(", "0", ")", "return", "# An index was found, so write a pointer to it", "#", "self", ".", "write_byte", "(", "(", "index", ">>", "8", ")", "|", "0xC0", ")", "self", ".", "write_byte", "(", "index", ")" ]
Writes a domain name to the packet
[ "Writes", "a", "domain", "name", "to", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L968-L992
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.write_question
def write_question(self, question): """Writes a question to the packet""" self.write_name(question.name) self.write_short(question.type) self.write_short(question.clazz)
python
def write_question(self, question): """Writes a question to the packet""" self.write_name(question.name) self.write_short(question.type) self.write_short(question.clazz)
[ "def", "write_question", "(", "self", ",", "question", ")", ":", "self", ".", "write_name", "(", "question", ".", "name", ")", "self", ".", "write_short", "(", "question", ".", "type", ")", "self", ".", "write_short", "(", "question", ".", "clazz", ")" ]
Writes a question to the packet
[ "Writes", "a", "question", "to", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L994-L998
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.write_record
def write_record(self, record, now): """Writes a record (answer, authoritative answer, additional) to the packet""" self.write_name(record.name) self.write_short(record.type) if record.unique and self.multicast: self.write_short(record.clazz | _CLASS_UNIQUE) else: self.write_short(record.clazz) if now == 0: self.write_int(record.ttl) else: self.write_int(record.get_remaining_ttl(now)) index = len(self.data) # Adjust size for the short we will write before this record # self.size += 2 record.write(self) self.size -= 2 length = len(b''.join(self.data[index:])) self.insert_short(index, length)
python
def write_record(self, record, now): """Writes a record (answer, authoritative answer, additional) to the packet""" self.write_name(record.name) self.write_short(record.type) if record.unique and self.multicast: self.write_short(record.clazz | _CLASS_UNIQUE) else: self.write_short(record.clazz) if now == 0: self.write_int(record.ttl) else: self.write_int(record.get_remaining_ttl(now)) index = len(self.data) # Adjust size for the short we will write before this record # self.size += 2 record.write(self) self.size -= 2 length = len(b''.join(self.data[index:])) self.insert_short(index, length)
[ "def", "write_record", "(", "self", ",", "record", ",", "now", ")", ":", "self", ".", "write_name", "(", "record", ".", "name", ")", "self", ".", "write_short", "(", "record", ".", "type", ")", "if", "record", ".", "unique", "and", "self", ".", "multicast", ":", "self", ".", "write_short", "(", "record", ".", "clazz", "|", "_CLASS_UNIQUE", ")", "else", ":", "self", ".", "write_short", "(", "record", ".", "clazz", ")", "if", "now", "==", "0", ":", "self", ".", "write_int", "(", "record", ".", "ttl", ")", "else", ":", "self", ".", "write_int", "(", "record", ".", "get_remaining_ttl", "(", "now", ")", ")", "index", "=", "len", "(", "self", ".", "data", ")", "# Adjust size for the short we will write before this record", "#", "self", ".", "size", "+=", "2", "record", ".", "write", "(", "self", ")", "self", ".", "size", "-=", "2", "length", "=", "len", "(", "b''", ".", "join", "(", "self", ".", "data", "[", "index", ":", "]", ")", ")", "self", ".", "insert_short", "(", "index", ",", "length", ")" ]
Writes a record (answer, authoritative answer, additional) to the packet
[ "Writes", "a", "record", "(", "answer", "authoritative", "answer", "additional", ")", "to", "the", "packet" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1000-L1021
svinota/mdns
mdns/zeroconf.py
DNSOutgoing.packet
def packet(self): """Returns a string containing the packet's bytes No further parts should be added to the packet once this is done.""" if not self.finished: self.finished = 1 for question in self.questions: self.write_question(question) for answer, time in self.answers: self.write_record(answer, time) for authority in self.authorities: self.write_record(authority, 0) for additional in self.additionals: self.write_record(additional, 0) self.insert_short(0, len(self.additionals)) self.insert_short(0, len(self.authorities)) self.insert_short(0, len(self.answers)) self.insert_short(0, len(self.questions)) self.insert_short(0, self.flags) if self.multicast: self.insert_short(0, 0) else: self.insert_short(0, self.id) return b''.join(self.data)
python
def packet(self): """Returns a string containing the packet's bytes No further parts should be added to the packet once this is done.""" if not self.finished: self.finished = 1 for question in self.questions: self.write_question(question) for answer, time in self.answers: self.write_record(answer, time) for authority in self.authorities: self.write_record(authority, 0) for additional in self.additionals: self.write_record(additional, 0) self.insert_short(0, len(self.additionals)) self.insert_short(0, len(self.authorities)) self.insert_short(0, len(self.answers)) self.insert_short(0, len(self.questions)) self.insert_short(0, self.flags) if self.multicast: self.insert_short(0, 0) else: self.insert_short(0, self.id) return b''.join(self.data)
[ "def", "packet", "(", "self", ")", ":", "if", "not", "self", ".", "finished", ":", "self", ".", "finished", "=", "1", "for", "question", "in", "self", ".", "questions", ":", "self", ".", "write_question", "(", "question", ")", "for", "answer", ",", "time", "in", "self", ".", "answers", ":", "self", ".", "write_record", "(", "answer", ",", "time", ")", "for", "authority", "in", "self", ".", "authorities", ":", "self", ".", "write_record", "(", "authority", ",", "0", ")", "for", "additional", "in", "self", ".", "additionals", ":", "self", ".", "write_record", "(", "additional", ",", "0", ")", "self", ".", "insert_short", "(", "0", ",", "len", "(", "self", ".", "additionals", ")", ")", "self", ".", "insert_short", "(", "0", ",", "len", "(", "self", ".", "authorities", ")", ")", "self", ".", "insert_short", "(", "0", ",", "len", "(", "self", ".", "answers", ")", ")", "self", ".", "insert_short", "(", "0", ",", "len", "(", "self", ".", "questions", ")", ")", "self", ".", "insert_short", "(", "0", ",", "self", ".", "flags", ")", "if", "self", ".", "multicast", ":", "self", ".", "insert_short", "(", "0", ",", "0", ")", "else", ":", "self", ".", "insert_short", "(", "0", ",", "self", ".", "id", ")", "return", "b''", ".", "join", "(", "self", ".", "data", ")" ]
Returns a string containing the packet's bytes No further parts should be added to the packet once this is done.
[ "Returns", "a", "string", "containing", "the", "packet", "s", "bytes" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1023-L1048
svinota/mdns
mdns/zeroconf.py
DNSCache.add
def add(self, entry): """Adds an entry""" if self.get(entry) is not None: return try: list = self.cache[entry.key] except: list = self.cache[entry.key] = [] list.append(entry)
python
def add(self, entry): """Adds an entry""" if self.get(entry) is not None: return try: list = self.cache[entry.key] except: list = self.cache[entry.key] = [] list.append(entry)
[ "def", "add", "(", "self", ",", "entry", ")", ":", "if", "self", ".", "get", "(", "entry", ")", "is", "not", "None", ":", "return", "try", ":", "list", "=", "self", ".", "cache", "[", "entry", ".", "key", "]", "except", ":", "list", "=", "self", ".", "cache", "[", "entry", ".", "key", "]", "=", "[", "]", "list", ".", "append", "(", "entry", ")" ]
Adds an entry
[ "Adds", "an", "entry" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1058-L1066
svinota/mdns
mdns/zeroconf.py
DNSCache.sign
def sign(self, entry, signer=None): """Adds and sign an entry""" if (self.get(entry) is not None): return if (entry.rrsig is None) and (self.private is not None): entry.rrsig = DNSSignatureS(entry.name, _TYPE_RRSIG, _CLASS_IN, entry, self.private, signer) self.add(entry) if (self.private is not None): self.add(entry.rrsig)
python
def sign(self, entry, signer=None): """Adds and sign an entry""" if (self.get(entry) is not None): return if (entry.rrsig is None) and (self.private is not None): entry.rrsig = DNSSignatureS(entry.name, _TYPE_RRSIG, _CLASS_IN, entry, self.private, signer) self.add(entry) if (self.private is not None): self.add(entry.rrsig)
[ "def", "sign", "(", "self", ",", "entry", ",", "signer", "=", "None", ")", ":", "if", "(", "self", ".", "get", "(", "entry", ")", "is", "not", "None", ")", ":", "return", "if", "(", "entry", ".", "rrsig", "is", "None", ")", "and", "(", "self", ".", "private", "is", "not", "None", ")", ":", "entry", ".", "rrsig", "=", "DNSSignatureS", "(", "entry", ".", "name", ",", "_TYPE_RRSIG", ",", "_CLASS_IN", ",", "entry", ",", "self", ".", "private", ",", "signer", ")", "self", ".", "add", "(", "entry", ")", "if", "(", "self", ".", "private", "is", "not", "None", ")", ":", "self", ".", "add", "(", "entry", ".", "rrsig", ")" ]
Adds and sign an entry
[ "Adds", "and", "sign", "an", "entry" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1068-L1077
svinota/mdns
mdns/zeroconf.py
DNSCache.remove
def remove(self, entry): """Removes an entry""" try: list = self.cache[entry.key] list.remove(entry) except: pass
python
def remove(self, entry): """Removes an entry""" try: list = self.cache[entry.key] list.remove(entry) except: pass
[ "def", "remove", "(", "self", ",", "entry", ")", ":", "try", ":", "list", "=", "self", ".", "cache", "[", "entry", ".", "key", "]", "list", ".", "remove", "(", "entry", ")", "except", ":", "pass" ]
Removes an entry
[ "Removes", "an", "entry" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1079-L1085
svinota/mdns
mdns/zeroconf.py
DNSCache.get
def get(self, entry): """Gets an entry by key. Will return None if there is no matching entry.""" try: list = self.cache[entry.key] return list[list.index(entry)] except: return None
python
def get(self, entry): """Gets an entry by key. Will return None if there is no matching entry.""" try: list = self.cache[entry.key] return list[list.index(entry)] except: return None
[ "def", "get", "(", "self", ",", "entry", ")", ":", "try", ":", "list", "=", "self", ".", "cache", "[", "entry", ".", "key", "]", "return", "list", "[", "list", ".", "index", "(", "entry", ")", "]", "except", ":", "return", "None" ]
Gets an entry by key. Will return None if there is no matching entry.
[ "Gets", "an", "entry", "by", "key", ".", "Will", "return", "None", "if", "there", "is", "no", "matching", "entry", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1087-L1094
svinota/mdns
mdns/zeroconf.py
DNSCache.get_by_details
def get_by_details(self, name, type, clazz): """Gets an entry by details. Will return None if there is no matching entry.""" entry = DNSEntry(name, type, clazz) return self.get(entry)
python
def get_by_details(self, name, type, clazz): """Gets an entry by details. Will return None if there is no matching entry.""" entry = DNSEntry(name, type, clazz) return self.get(entry)
[ "def", "get_by_details", "(", "self", ",", "name", ",", "type", ",", "clazz", ")", ":", "entry", "=", "DNSEntry", "(", "name", ",", "type", ",", "clazz", ")", "return", "self", ".", "get", "(", "entry", ")" ]
Gets an entry by details. Will return None if there is no matching entry.
[ "Gets", "an", "entry", "by", "details", ".", "Will", "return", "None", "if", "there", "is", "no", "matching", "entry", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1096-L1100
svinota/mdns
mdns/zeroconf.py
DNSCache.entries
def entries(self): """Returns a list of all entries""" def add(x, y): return x + y try: return reduce(add, list(self.cache.values())) except: return []
python
def entries(self): """Returns a list of all entries""" def add(x, y): return x + y try: return reduce(add, list(self.cache.values())) except: return []
[ "def", "entries", "(", "self", ")", ":", "def", "add", "(", "x", ",", "y", ")", ":", "return", "x", "+", "y", "try", ":", "return", "reduce", "(", "add", ",", "list", "(", "self", ".", "cache", ".", "values", "(", ")", ")", ")", "except", ":", "return", "[", "]" ]
Returns a list of all entries
[ "Returns", "a", "list", "of", "all", "entries" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1109-L1116
svinota/mdns
mdns/zeroconf.py
ServiceBrowser.update_record
def update_record(self, zeroconf, now, record): """Callback invoked by Zeroconf when new information arrives. Updates information required by browser in the Zeroconf cache.""" if record.type == _TYPE_PTR and record.name == self.type: expired = record.is_expired(now) try: oldrecord = self.services[record.alias.lower()] if not expired: oldrecord.reset_ttl(record) else: del(self.services[record.alias.lower()]) callback = lambda x: self.listener.remove_service(x, self.type, record.alias) self.list.append(callback) return except: if not expired: self.services[record.alias.lower()] = record callback = lambda x: self.listener.add_service(x, self.type, record.alias) self.list.append(callback) expires = record.get_expiration_time(75) if expires < self.next_time: self.next_time = expires
python
def update_record(self, zeroconf, now, record): """Callback invoked by Zeroconf when new information arrives. Updates information required by browser in the Zeroconf cache.""" if record.type == _TYPE_PTR and record.name == self.type: expired = record.is_expired(now) try: oldrecord = self.services[record.alias.lower()] if not expired: oldrecord.reset_ttl(record) else: del(self.services[record.alias.lower()]) callback = lambda x: self.listener.remove_service(x, self.type, record.alias) self.list.append(callback) return except: if not expired: self.services[record.alias.lower()] = record callback = lambda x: self.listener.add_service(x, self.type, record.alias) self.list.append(callback) expires = record.get_expiration_time(75) if expires < self.next_time: self.next_time = expires
[ "def", "update_record", "(", "self", ",", "zeroconf", ",", "now", ",", "record", ")", ":", "if", "record", ".", "type", "==", "_TYPE_PTR", "and", "record", ".", "name", "==", "self", ".", "type", ":", "expired", "=", "record", ".", "is_expired", "(", "now", ")", "try", ":", "oldrecord", "=", "self", ".", "services", "[", "record", ".", "alias", ".", "lower", "(", ")", "]", "if", "not", "expired", ":", "oldrecord", ".", "reset_ttl", "(", "record", ")", "else", ":", "del", "(", "self", ".", "services", "[", "record", ".", "alias", ".", "lower", "(", ")", "]", ")", "callback", "=", "lambda", "x", ":", "self", ".", "listener", ".", "remove_service", "(", "x", ",", "self", ".", "type", ",", "record", ".", "alias", ")", "self", ".", "list", ".", "append", "(", "callback", ")", "return", "except", ":", "if", "not", "expired", ":", "self", ".", "services", "[", "record", ".", "alias", ".", "lower", "(", ")", "]", "=", "record", "callback", "=", "lambda", "x", ":", "self", ".", "listener", ".", "add_service", "(", "x", ",", "self", ".", "type", ",", "record", ".", "alias", ")", "self", ".", "list", ".", "append", "(", "callback", ")", "expires", "=", "record", ".", "get_expiration_time", "(", "75", ")", "if", "expires", "<", "self", ".", "next_time", ":", "self", ".", "next_time", "=", "expires" ]
Callback invoked by Zeroconf when new information arrives. Updates information required by browser in the Zeroconf cache.
[ "Callback", "invoked", "by", "Zeroconf", "when", "new", "information", "arrives", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1273-L1298
svinota/mdns
mdns/zeroconf.py
ServiceInfo.set_properties
def set_properties(self, properties): """Sets properties and text of this info from a dictionary""" if isinstance(properties, dict): self.properties = properties self.sync_properties() else: self.text = properties
python
def set_properties(self, properties): """Sets properties and text of this info from a dictionary""" if isinstance(properties, dict): self.properties = properties self.sync_properties() else: self.text = properties
[ "def", "set_properties", "(", "self", ",", "properties", ")", ":", "if", "isinstance", "(", "properties", ",", "dict", ")", ":", "self", ".", "properties", "=", "properties", "self", ".", "sync_properties", "(", ")", "else", ":", "self", ".", "text", "=", "properties" ]
Sets properties and text of this info from a dictionary
[ "Sets", "properties", "and", "text", "of", "this", "info", "from", "a", "dictionary" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1399-L1405
svinota/mdns
mdns/zeroconf.py
ServiceInfo.set_text
def set_text(self, text): """Sets properties and text given a text field""" self.text = text try: self.properties = text_to_dict(text) except: traceback.print_exc() self.properties = None
python
def set_text(self, text): """Sets properties and text given a text field""" self.text = text try: self.properties = text_to_dict(text) except: traceback.print_exc() self.properties = None
[ "def", "set_text", "(", "self", ",", "text", ")", ":", "self", ".", "text", "=", "text", "try", ":", "self", ".", "properties", "=", "text_to_dict", "(", "text", ")", "except", ":", "traceback", ".", "print_exc", "(", ")", "self", ".", "properties", "=", "None" ]
Sets properties and text given a text field
[ "Sets", "properties", "and", "text", "given", "a", "text", "field" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1407-L1414
svinota/mdns
mdns/zeroconf.py
ServiceInfo.get_name
def get_name(self): """Name accessor""" if self.type is not None and self.name.endswith("." + self.type): return self.name[:len(self.name) - len(self.type) - 1] return self.name
python
def get_name(self): """Name accessor""" if self.type is not None and self.name.endswith("." + self.type): return self.name[:len(self.name) - len(self.type) - 1] return self.name
[ "def", "get_name", "(", "self", ")", ":", "if", "self", ".", "type", "is", "not", "None", "and", "self", ".", "name", ".", "endswith", "(", "\".\"", "+", "self", ".", "type", ")", ":", "return", "self", ".", "name", "[", ":", "len", "(", "self", ".", "name", ")", "-", "len", "(", "self", ".", "type", ")", "-", "1", "]", "return", "self", ".", "name" ]
Name accessor
[ "Name", "accessor" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1420-L1424
svinota/mdns
mdns/zeroconf.py
ServiceInfo.update_record
def update_record(self, zeroconf, now, record): """Updates service information from a DNS record""" if record is not None and not record.is_expired(now): if record.type == _TYPE_A: if record.name == self.name: if not record.address in self.address: self.address.append(record.address) elif record.type == _TYPE_SRV: if record.name == self.name: self.server = record.server self.port = record.port self.weight = record.weight self.priority = record.priority self.address = [] self.update_record(zeroconf, now, zeroconf.cache.get_by_details(self.server, _TYPE_A, _CLASS_IN)) elif record.type == _TYPE_TXT: if record.name == self.name: self.set_text(record.text)
python
def update_record(self, zeroconf, now, record): """Updates service information from a DNS record""" if record is not None and not record.is_expired(now): if record.type == _TYPE_A: if record.name == self.name: if not record.address in self.address: self.address.append(record.address) elif record.type == _TYPE_SRV: if record.name == self.name: self.server = record.server self.port = record.port self.weight = record.weight self.priority = record.priority self.address = [] self.update_record(zeroconf, now, zeroconf.cache.get_by_details(self.server, _TYPE_A, _CLASS_IN)) elif record.type == _TYPE_TXT: if record.name == self.name: self.set_text(record.text)
[ "def", "update_record", "(", "self", ",", "zeroconf", ",", "now", ",", "record", ")", ":", "if", "record", "is", "not", "None", "and", "not", "record", ".", "is_expired", "(", "now", ")", ":", "if", "record", ".", "type", "==", "_TYPE_A", ":", "if", "record", ".", "name", "==", "self", ".", "name", ":", "if", "not", "record", ".", "address", "in", "self", ".", "address", ":", "self", ".", "address", ".", "append", "(", "record", ".", "address", ")", "elif", "record", ".", "type", "==", "_TYPE_SRV", ":", "if", "record", ".", "name", "==", "self", ".", "name", ":", "self", ".", "server", "=", "record", ".", "server", "self", ".", "port", "=", "record", ".", "port", "self", ".", "weight", "=", "record", ".", "weight", "self", ".", "priority", "=", "record", ".", "priority", "self", ".", "address", "=", "[", "]", "self", ".", "update_record", "(", "zeroconf", ",", "now", ",", "zeroconf", ".", "cache", ".", "get_by_details", "(", "self", ".", "server", ",", "_TYPE_A", ",", "_CLASS_IN", ")", ")", "elif", "record", ".", "type", "==", "_TYPE_TXT", ":", "if", "record", ".", "name", "==", "self", ".", "name", ":", "self", ".", "set_text", "(", "record", ".", "text", ")" ]
Updates service information from a DNS record
[ "Updates", "service", "information", "from", "a", "DNS", "record" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1454-L1473
svinota/mdns
mdns/zeroconf.py
ServiceInfo.request
def request(self, zeroconf, timeout): """Returns true if the service could be discovered on the network, and updates this object with details discovered. """ now = current_time_millis() delay = _LISTENER_TIME next = now + delay last = now + timeout result = 0 try: zeroconf.add_listener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN)) while self.server is None or \ len(self.address) == 0 or \ self.text is None: if last <= now: return 0 if next <= now: out = DNSOutgoing(_FLAGS_QR_QUERY) out.add_question(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN)) out.add_answer_at_time( zeroconf.cache.get_by_details(self.name, _TYPE_SRV, _CLASS_IN), now) out.add_question( DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN)) out.add_answer_at_time( zeroconf.cache.get_by_details(self.name, _TYPE_TXT, _CLASS_IN), now) if self.server is not None: out.add_question( DNSQuestion(self.server, _TYPE_A, _CLASS_IN)) out.add_answer_at_time( zeroconf.cache.get_by_details(self.server, _TYPE_A, _CLASS_IN), now) zeroconf.send(out) next = now + delay delay = delay * 2 zeroconf.wait(min(next, last) - now) now = current_time_millis() result = 1 finally: zeroconf.remove_listener(self) return result
python
def request(self, zeroconf, timeout): """Returns true if the service could be discovered on the network, and updates this object with details discovered. """ now = current_time_millis() delay = _LISTENER_TIME next = now + delay last = now + timeout result = 0 try: zeroconf.add_listener(self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN)) while self.server is None or \ len(self.address) == 0 or \ self.text is None: if last <= now: return 0 if next <= now: out = DNSOutgoing(_FLAGS_QR_QUERY) out.add_question(DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN)) out.add_answer_at_time( zeroconf.cache.get_by_details(self.name, _TYPE_SRV, _CLASS_IN), now) out.add_question( DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN)) out.add_answer_at_time( zeroconf.cache.get_by_details(self.name, _TYPE_TXT, _CLASS_IN), now) if self.server is not None: out.add_question( DNSQuestion(self.server, _TYPE_A, _CLASS_IN)) out.add_answer_at_time( zeroconf.cache.get_by_details(self.server, _TYPE_A, _CLASS_IN), now) zeroconf.send(out) next = now + delay delay = delay * 2 zeroconf.wait(min(next, last) - now) now = current_time_millis() result = 1 finally: zeroconf.remove_listener(self) return result
[ "def", "request", "(", "self", ",", "zeroconf", ",", "timeout", ")", ":", "now", "=", "current_time_millis", "(", ")", "delay", "=", "_LISTENER_TIME", "next", "=", "now", "+", "delay", "last", "=", "now", "+", "timeout", "result", "=", "0", "try", ":", "zeroconf", ".", "add_listener", "(", "self", ",", "DNSQuestion", "(", "self", ".", "name", ",", "_TYPE_ANY", ",", "_CLASS_IN", ")", ")", "while", "self", ".", "server", "is", "None", "or", "len", "(", "self", ".", "address", ")", "==", "0", "or", "self", ".", "text", "is", "None", ":", "if", "last", "<=", "now", ":", "return", "0", "if", "next", "<=", "now", ":", "out", "=", "DNSOutgoing", "(", "_FLAGS_QR_QUERY", ")", "out", ".", "add_question", "(", "DNSQuestion", "(", "self", ".", "name", ",", "_TYPE_SRV", ",", "_CLASS_IN", ")", ")", "out", ".", "add_answer_at_time", "(", "zeroconf", ".", "cache", ".", "get_by_details", "(", "self", ".", "name", ",", "_TYPE_SRV", ",", "_CLASS_IN", ")", ",", "now", ")", "out", ".", "add_question", "(", "DNSQuestion", "(", "self", ".", "name", ",", "_TYPE_TXT", ",", "_CLASS_IN", ")", ")", "out", ".", "add_answer_at_time", "(", "zeroconf", ".", "cache", ".", "get_by_details", "(", "self", ".", "name", ",", "_TYPE_TXT", ",", "_CLASS_IN", ")", ",", "now", ")", "if", "self", ".", "server", "is", "not", "None", ":", "out", ".", "add_question", "(", "DNSQuestion", "(", "self", ".", "server", ",", "_TYPE_A", ",", "_CLASS_IN", ")", ")", "out", ".", "add_answer_at_time", "(", "zeroconf", ".", "cache", ".", "get_by_details", "(", "self", ".", "server", ",", "_TYPE_A", ",", "_CLASS_IN", ")", ",", "now", ")", "zeroconf", ".", "send", "(", "out", ")", "next", "=", "now", "+", "delay", "delay", "=", "delay", "*", "2", "zeroconf", ".", "wait", "(", "min", "(", "next", ",", "last", ")", "-", "now", ")", "now", "=", "current_time_millis", "(", ")", "result", "=", "1", "finally", ":", "zeroconf", ".", "remove_listener", "(", "self", ")", "return", "result" ]
Returns true if the service could be discovered on the network, and updates this object with details discovered.
[ "Returns", "true", "if", "the", "service", "could", "be", "discovered", "on", "the", "network", "and", "updates", "this", "object", "with", "details", "discovered", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1475-L1520
svinota/mdns
mdns/zeroconf.py
Heartbeat.wait
def wait(self, timeout): """Calling thread waits for a given number of milliseconds or until notified.""" self.condition.acquire() self.condition.wait(timeout // 1000) self.condition.release()
python
def wait(self, timeout): """Calling thread waits for a given number of milliseconds or until notified.""" self.condition.acquire() self.condition.wait(timeout // 1000) self.condition.release()
[ "def", "wait", "(", "self", ",", "timeout", ")", ":", "self", ".", "condition", ".", "acquire", "(", ")", "self", ".", "condition", ".", "wait", "(", "timeout", "//", "1000", ")", "self", ".", "condition", ".", "release", "(", ")" ]
Calling thread waits for a given number of milliseconds or until notified.
[ "Calling", "thread", "waits", "for", "a", "given", "number", "of", "milliseconds", "or", "until", "notified", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1562-L1567
svinota/mdns
mdns/zeroconf.py
Heartbeat.notify_all
def notify_all(self): """Notifies all waiting threads""" self.condition.acquire() # python 3.x try: self.condition.notify_all() except: self.condition.notifyAll() self.condition.release()
python
def notify_all(self): """Notifies all waiting threads""" self.condition.acquire() # python 3.x try: self.condition.notify_all() except: self.condition.notifyAll() self.condition.release()
[ "def", "notify_all", "(", "self", ")", ":", "self", ".", "condition", ".", "acquire", "(", ")", "# python 3.x", "try", ":", "self", ".", "condition", ".", "notify_all", "(", ")", "except", ":", "self", ".", "condition", ".", "notifyAll", "(", ")", "self", ".", "condition", ".", "release", "(", ")" ]
Notifies all waiting threads
[ "Notifies", "all", "waiting", "threads" ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1569-L1577
svinota/mdns
mdns/zeroconf.py
Zeroconf.get_service_info
def get_service_info(self, type, name, timeout=3000): """Returns network's service information for a particular name and type, or None if no service matches by the timeout, which defaults to 3 seconds.""" info = ServiceInfo(type, name) if info.request(self, timeout): return info return None
python
def get_service_info(self, type, name, timeout=3000): """Returns network's service information for a particular name and type, or None if no service matches by the timeout, which defaults to 3 seconds.""" info = ServiceInfo(type, name) if info.request(self, timeout): return info return None
[ "def", "get_service_info", "(", "self", ",", "type", ",", "name", ",", "timeout", "=", "3000", ")", ":", "info", "=", "ServiceInfo", "(", "type", ",", "name", ")", "if", "info", ".", "request", "(", "self", ",", "timeout", ")", ":", "return", "info", "return", "None" ]
Returns network's service information for a particular name and type, or None if no service matches by the timeout, which defaults to 3 seconds.
[ "Returns", "network", "s", "service", "information", "for", "a", "particular", "name", "and", "type", "or", "None", "if", "no", "service", "matches", "by", "the", "timeout", "which", "defaults", "to", "3", "seconds", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1712-L1719
svinota/mdns
mdns/zeroconf.py
Zeroconf.add_serviceListener
def add_serviceListener(self, type, listener): """Adds a listener for a particular service type. This object will then have its update_record method called when information arrives for that type.""" self.remove_service_listener(listener) self.browsers.append(ServiceBrowser(self, type, listener))
python
def add_serviceListener(self, type, listener): """Adds a listener for a particular service type. This object will then have its update_record method called when information arrives for that type.""" self.remove_service_listener(listener) self.browsers.append(ServiceBrowser(self, type, listener))
[ "def", "add_serviceListener", "(", "self", ",", "type", ",", "listener", ")", ":", "self", ".", "remove_service_listener", "(", "listener", ")", "self", ".", "browsers", ".", "append", "(", "ServiceBrowser", "(", "self", ",", "type", ",", "listener", ")", ")" ]
Adds a listener for a particular service type. This object will then have its update_record method called when information arrives for that type.
[ "Adds", "a", "listener", "for", "a", "particular", "service", "type", ".", "This", "object", "will", "then", "have", "its", "update_record", "method", "called", "when", "information", "arrives", "for", "that", "type", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1721-L1726
svinota/mdns
mdns/zeroconf.py
Zeroconf.remove_service_listener
def remove_service_listener(self, listener): """Removes a listener from the set that is currently listening.""" for browser in self.browsers: if browser.listener == listener: browser.cancel() del(browser)
python
def remove_service_listener(self, listener): """Removes a listener from the set that is currently listening.""" for browser in self.browsers: if browser.listener == listener: browser.cancel() del(browser)
[ "def", "remove_service_listener", "(", "self", ",", "listener", ")", ":", "for", "browser", "in", "self", ".", "browsers", ":", "if", "browser", ".", "listener", "==", "listener", ":", "browser", ".", "cancel", "(", ")", "del", "(", "browser", ")" ]
Removes a listener from the set that is currently listening.
[ "Removes", "a", "listener", "from", "the", "set", "that", "is", "currently", "listening", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1728-L1733
svinota/mdns
mdns/zeroconf.py
Zeroconf.register_service
def register_service(self, info): """Registers service information to the network with a default TTL of 60 seconds. Zeroconf will then respond to requests for information for that service. The name of the service may be changed if needed to make it unique on the network.""" self.check_service(info) self.services[info.name.lower()] = info # zone transfer self.transfer_zone(info.type) self.announce_service(info.name)
python
def register_service(self, info): """Registers service information to the network with a default TTL of 60 seconds. Zeroconf will then respond to requests for information for that service. The name of the service may be changed if needed to make it unique on the network.""" self.check_service(info) self.services[info.name.lower()] = info # zone transfer self.transfer_zone(info.type) self.announce_service(info.name)
[ "def", "register_service", "(", "self", ",", "info", ")", ":", "self", ".", "check_service", "(", "info", ")", "self", ".", "services", "[", "info", ".", "name", ".", "lower", "(", ")", "]", "=", "info", "# zone transfer", "self", ".", "transfer_zone", "(", "info", ".", "type", ")", "self", ".", "announce_service", "(", "info", ".", "name", ")" ]
Registers service information to the network with a default TTL of 60 seconds. Zeroconf will then respond to requests for information for that service. The name of the service may be changed if needed to make it unique on the network.
[ "Registers", "service", "information", "to", "the", "network", "with", "a", "default", "TTL", "of", "60", "seconds", ".", "Zeroconf", "will", "then", "respond", "to", "requests", "for", "information", "for", "that", "service", ".", "The", "name", "of", "the", "service", "may", "be", "changed", "if", "needed", "to", "make", "it", "unique", "on", "the", "network", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1738-L1748
svinota/mdns
mdns/zeroconf.py
Zeroconf.unregister_service
def unregister_service(self, info): """Unregister a service.""" try: del(self.services[info.name.lower()]) except: pass now = current_time_millis() next_time = now i = 0 while i < 3: if now < next_time: self.wait(next_time - now) now = current_time_millis() continue out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) out.add_answer_at_time( DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0) out.add_answer_at_time( DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0) out.add_answer_at_time( DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0) for k in info.address: out.add_answer_at_time( DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, k), 0) self.send(out) i += 1 next_time += _UNREGISTER_TIME
python
def unregister_service(self, info): """Unregister a service.""" try: del(self.services[info.name.lower()]) except: pass now = current_time_millis() next_time = now i = 0 while i < 3: if now < next_time: self.wait(next_time - now) now = current_time_millis() continue out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) out.add_answer_at_time( DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0) out.add_answer_at_time( DNSService(info.name, _TYPE_SRV, _CLASS_IN, 0, info.priority, info.weight, info.port, info.name), 0) out.add_answer_at_time( DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0) for k in info.address: out.add_answer_at_time( DNSAddress(info.server, _TYPE_A, _CLASS_IN, 0, k), 0) self.send(out) i += 1 next_time += _UNREGISTER_TIME
[ "def", "unregister_service", "(", "self", ",", "info", ")", ":", "try", ":", "del", "(", "self", ".", "services", "[", "info", ".", "name", ".", "lower", "(", ")", "]", ")", "except", ":", "pass", "now", "=", "current_time_millis", "(", ")", "next_time", "=", "now", "i", "=", "0", "while", "i", "<", "3", ":", "if", "now", "<", "next_time", ":", "self", ".", "wait", "(", "next_time", "-", "now", ")", "now", "=", "current_time_millis", "(", ")", "continue", "out", "=", "DNSOutgoing", "(", "_FLAGS_QR_RESPONSE", "|", "_FLAGS_AA", ")", "out", ".", "add_answer_at_time", "(", "DNSPointer", "(", "info", ".", "type", ",", "_TYPE_PTR", ",", "_CLASS_IN", ",", "0", ",", "info", ".", "name", ")", ",", "0", ")", "out", ".", "add_answer_at_time", "(", "DNSService", "(", "info", ".", "name", ",", "_TYPE_SRV", ",", "_CLASS_IN", ",", "0", ",", "info", ".", "priority", ",", "info", ".", "weight", ",", "info", ".", "port", ",", "info", ".", "name", ")", ",", "0", ")", "out", ".", "add_answer_at_time", "(", "DNSText", "(", "info", ".", "name", ",", "_TYPE_TXT", ",", "_CLASS_IN", ",", "0", ",", "info", ".", "text", ")", ",", "0", ")", "for", "k", "in", "info", ".", "address", ":", "out", ".", "add_answer_at_time", "(", "DNSAddress", "(", "info", ".", "server", ",", "_TYPE_A", ",", "_CLASS_IN", ",", "0", ",", "k", ")", ",", "0", ")", "self", ".", "send", "(", "out", ")", "i", "+=", "1", "next_time", "+=", "_UNREGISTER_TIME" ]
Unregister a service.
[ "Unregister", "a", "service", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1806-L1835
svinota/mdns
mdns/zeroconf.py
Zeroconf.check_service
def check_service(self, info): """Checks the network for a unique service name, modifying the ServiceInfo passed in if it is not unique.""" now = current_time_millis() next_time = now i = 0 while i < 3: for record in self.cache.entries_with_name(info.type): if record.type == _TYPE_PTR and \ not record.is_expired(now) and \ record.alias == info.name: if (info.name.find('.') < 0): info.name = info.name + ".[" + \ info.address + \ ":" + info.port + \ "]." + info.type self.check_service(info) return raise NonUniqueNameException if now < next_time: self.wait(next_time - now) now = current_time_millis() continue out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA) self.debug = out out.add_question( DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN)) out.add_authorative_answer( DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, info.ttl, info.name)) self.send(out) i += 1 next_time += _CHECK_TIME
python
def check_service(self, info): """Checks the network for a unique service name, modifying the ServiceInfo passed in if it is not unique.""" now = current_time_millis() next_time = now i = 0 while i < 3: for record in self.cache.entries_with_name(info.type): if record.type == _TYPE_PTR and \ not record.is_expired(now) and \ record.alias == info.name: if (info.name.find('.') < 0): info.name = info.name + ".[" + \ info.address + \ ":" + info.port + \ "]." + info.type self.check_service(info) return raise NonUniqueNameException if now < next_time: self.wait(next_time - now) now = current_time_millis() continue out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA) self.debug = out out.add_question( DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN)) out.add_authorative_answer( DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, info.ttl, info.name)) self.send(out) i += 1 next_time += _CHECK_TIME
[ "def", "check_service", "(", "self", ",", "info", ")", ":", "now", "=", "current_time_millis", "(", ")", "next_time", "=", "now", "i", "=", "0", "while", "i", "<", "3", ":", "for", "record", "in", "self", ".", "cache", ".", "entries_with_name", "(", "info", ".", "type", ")", ":", "if", "record", ".", "type", "==", "_TYPE_PTR", "and", "not", "record", ".", "is_expired", "(", "now", ")", "and", "record", ".", "alias", "==", "info", ".", "name", ":", "if", "(", "info", ".", "name", ".", "find", "(", "'.'", ")", "<", "0", ")", ":", "info", ".", "name", "=", "info", ".", "name", "+", "\".[\"", "+", "info", ".", "address", "+", "\":\"", "+", "info", ".", "port", "+", "\"].\"", "+", "info", ".", "type", "self", ".", "check_service", "(", "info", ")", "return", "raise", "NonUniqueNameException", "if", "now", "<", "next_time", ":", "self", ".", "wait", "(", "next_time", "-", "now", ")", "now", "=", "current_time_millis", "(", ")", "continue", "out", "=", "DNSOutgoing", "(", "_FLAGS_QR_QUERY", "|", "_FLAGS_AA", ")", "self", ".", "debug", "=", "out", "out", ".", "add_question", "(", "DNSQuestion", "(", "info", ".", "type", ",", "_TYPE_PTR", ",", "_CLASS_IN", ")", ")", "out", ".", "add_authorative_answer", "(", "DNSPointer", "(", "info", ".", "type", ",", "_TYPE_PTR", ",", "_CLASS_IN", ",", "info", ".", "ttl", ",", "info", ".", "name", ")", ")", "self", ".", "send", "(", "out", ")", "i", "+=", "1", "next_time", "+=", "_CHECK_TIME" ]
Checks the network for a unique service name, modifying the ServiceInfo passed in if it is not unique.
[ "Checks", "the", "network", "for", "a", "unique", "service", "name", "modifying", "the", "ServiceInfo", "passed", "in", "if", "it", "is", "not", "unique", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1868-L1900
svinota/mdns
mdns/zeroconf.py
Zeroconf.add_listener
def add_listener(self, listener, question): """Adds a listener for a given question. The listener will have its update_record method called when information is available to answer the question.""" now = current_time_millis() self.listeners.append(listener) if question is not None: for record in self.cache.entries_with_name(question.name): if question.answered_by(record) and not record.is_expired(now): listener.update_record(self, now, record) self.notify_all()
python
def add_listener(self, listener, question): """Adds a listener for a given question. The listener will have its update_record method called when information is available to answer the question.""" now = current_time_millis() self.listeners.append(listener) if question is not None: for record in self.cache.entries_with_name(question.name): if question.answered_by(record) and not record.is_expired(now): listener.update_record(self, now, record) self.notify_all()
[ "def", "add_listener", "(", "self", ",", "listener", ",", "question", ")", ":", "now", "=", "current_time_millis", "(", ")", "self", ".", "listeners", ".", "append", "(", "listener", ")", "if", "question", "is", "not", "None", ":", "for", "record", "in", "self", ".", "cache", ".", "entries_with_name", "(", "question", ".", "name", ")", ":", "if", "question", ".", "answered_by", "(", "record", ")", "and", "not", "record", ".", "is_expired", "(", "now", ")", ":", "listener", ".", "update_record", "(", "self", ",", "now", ",", "record", ")", "self", ".", "notify_all", "(", ")" ]
Adds a listener for a given question. The listener will have its update_record method called when information is available to answer the question.
[ "Adds", "a", "listener", "for", "a", "given", "question", ".", "The", "listener", "will", "have", "its", "update_record", "method", "called", "when", "information", "is", "available", "to", "answer", "the", "question", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1906-L1916
svinota/mdns
mdns/zeroconf.py
Zeroconf.update_record
def update_record(self, now, rec): """Used to notify listeners of new information that has updated a record.""" for listener in self.listeners: listener.update_record(self, now, rec) self.notify_all()
python
def update_record(self, now, rec): """Used to notify listeners of new information that has updated a record.""" for listener in self.listeners: listener.update_record(self, now, rec) self.notify_all()
[ "def", "update_record", "(", "self", ",", "now", ",", "rec", ")", ":", "for", "listener", "in", "self", ".", "listeners", ":", "listener", ".", "update_record", "(", "self", ",", "now", ",", "rec", ")", "self", ".", "notify_all", "(", ")" ]
Used to notify listeners of new information that has updated a record.
[ "Used", "to", "notify", "listeners", "of", "new", "information", "that", "has", "updated", "a", "record", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1926-L1931
svinota/mdns
mdns/zeroconf.py
Zeroconf.handle_response
def handle_response(self, msg, address): """Deal with incoming response packets. All answers are held in the cache, and listeners are notified.""" now = current_time_millis() sigs = [] precache = [] for record in msg.answers: if isinstance(record, DNSSignature): sigs.append(record) else: precache.append(record) for e in precache: for s in sigs: if self.verify(e, s): # print "DNS: %s verified with %s" % (e,s) if self.adaptive and e.type == _TYPE_A: if e.address == '\x00\x00\x00\x00': e.address = socket.inet_aton(address) if e in self.cache.entries(): if e.is_expired(now): for i in self.hooks: try: i.remove(e) except: pass self.cache.remove(e) self.cache.remove(s) else: entry = self.cache.get(e) sig = self.cache.get(s) if (entry is not None) and (sig is not None): for i in self.hooks: try: i.update(e) except: pass entry.reset_ttl(e) sig.reset_ttl(s) else: e.rrsig = s self.cache.add(e) self.cache.add(s) for i in self.hooks: try: i.add(e) except: pass precache.remove(e) sigs.remove(s) self.update_record(now, record) if self.bypass: for e in precache: if e in self.cache.entries(): if e.is_expired(now): for i in self.hooks: try: i.remove(e) except: pass self.cache.remove(e) else: entry = self.cache.get(e) if (entry is not None): for i in self.hooks: try: i.update(e) except: pass entry.reset_ttl(e) else: self.cache.add(e) for i in self.hooks: try: i.add(e) except: pass self.update_record(now, record)
python
def handle_response(self, msg, address): """Deal with incoming response packets. All answers are held in the cache, and listeners are notified.""" now = current_time_millis() sigs = [] precache = [] for record in msg.answers: if isinstance(record, DNSSignature): sigs.append(record) else: precache.append(record) for e in precache: for s in sigs: if self.verify(e, s): # print "DNS: %s verified with %s" % (e,s) if self.adaptive and e.type == _TYPE_A: if e.address == '\x00\x00\x00\x00': e.address = socket.inet_aton(address) if e in self.cache.entries(): if e.is_expired(now): for i in self.hooks: try: i.remove(e) except: pass self.cache.remove(e) self.cache.remove(s) else: entry = self.cache.get(e) sig = self.cache.get(s) if (entry is not None) and (sig is not None): for i in self.hooks: try: i.update(e) except: pass entry.reset_ttl(e) sig.reset_ttl(s) else: e.rrsig = s self.cache.add(e) self.cache.add(s) for i in self.hooks: try: i.add(e) except: pass precache.remove(e) sigs.remove(s) self.update_record(now, record) if self.bypass: for e in precache: if e in self.cache.entries(): if e.is_expired(now): for i in self.hooks: try: i.remove(e) except: pass self.cache.remove(e) else: entry = self.cache.get(e) if (entry is not None): for i in self.hooks: try: i.update(e) except: pass entry.reset_ttl(e) else: self.cache.add(e) for i in self.hooks: try: i.add(e) except: pass self.update_record(now, record)
[ "def", "handle_response", "(", "self", ",", "msg", ",", "address", ")", ":", "now", "=", "current_time_millis", "(", ")", "sigs", "=", "[", "]", "precache", "=", "[", "]", "for", "record", "in", "msg", ".", "answers", ":", "if", "isinstance", "(", "record", ",", "DNSSignature", ")", ":", "sigs", ".", "append", "(", "record", ")", "else", ":", "precache", ".", "append", "(", "record", ")", "for", "e", "in", "precache", ":", "for", "s", "in", "sigs", ":", "if", "self", ".", "verify", "(", "e", ",", "s", ")", ":", "# print \"DNS: %s verified with %s\" % (e,s)", "if", "self", ".", "adaptive", "and", "e", ".", "type", "==", "_TYPE_A", ":", "if", "e", ".", "address", "==", "'\\x00\\x00\\x00\\x00'", ":", "e", ".", "address", "=", "socket", ".", "inet_aton", "(", "address", ")", "if", "e", "in", "self", ".", "cache", ".", "entries", "(", ")", ":", "if", "e", ".", "is_expired", "(", "now", ")", ":", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "remove", "(", "e", ")", "except", ":", "pass", "self", ".", "cache", ".", "remove", "(", "e", ")", "self", ".", "cache", ".", "remove", "(", "s", ")", "else", ":", "entry", "=", "self", ".", "cache", ".", "get", "(", "e", ")", "sig", "=", "self", ".", "cache", ".", "get", "(", "s", ")", "if", "(", "entry", "is", "not", "None", ")", "and", "(", "sig", "is", "not", "None", ")", ":", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "update", "(", "e", ")", "except", ":", "pass", "entry", ".", "reset_ttl", "(", "e", ")", "sig", ".", "reset_ttl", "(", "s", ")", "else", ":", "e", ".", "rrsig", "=", "s", "self", ".", "cache", ".", "add", "(", "e", ")", "self", ".", "cache", ".", "add", "(", "s", ")", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "add", "(", "e", ")", "except", ":", "pass", "precache", ".", "remove", "(", "e", ")", "sigs", ".", "remove", "(", "s", ")", "self", ".", "update_record", "(", "now", ",", "record", ")", "if", "self", ".", "bypass", ":", "for", "e", "in", "precache", ":", "if", "e", "in", "self", ".", "cache", ".", "entries", "(", ")", ":", "if", "e", ".", "is_expired", "(", "now", ")", ":", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "remove", "(", "e", ")", "except", ":", "pass", "self", ".", "cache", ".", "remove", "(", "e", ")", "else", ":", "entry", "=", "self", ".", "cache", ".", "get", "(", "e", ")", "if", "(", "entry", "is", "not", "None", ")", ":", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "update", "(", "e", ")", "except", ":", "pass", "entry", ".", "reset_ttl", "(", "e", ")", "else", ":", "self", ".", "cache", ".", "add", "(", "e", ")", "for", "i", "in", "self", ".", "hooks", ":", "try", ":", "i", ".", "add", "(", "e", ")", "except", ":", "pass", "self", ".", "update_record", "(", "now", ",", "record", ")" ]
Deal with incoming response packets. All answers are held in the cache, and listeners are notified.
[ "Deal", "with", "incoming", "response", "packets", ".", "All", "answers", "are", "held", "in", "the", "cache", "and", "listeners", "are", "notified", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L1952-L2036
svinota/mdns
mdns/zeroconf.py
Zeroconf.handle_query
def handle_query(self, msg, addr, port, orig): """ Deal with incoming query packets. Provides a response if possible. msg - message to process addr - dst addr port - dst port orig - originating address (for adaptive records) """ out = None # Support unicast client responses # if port != _MDNS_PORT: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0) for question in msg.questions: out.add_question(question) for question in msg.questions: if question.type == _TYPE_PTR: for service in self.services.values(): if question.name == service.type: # FIXME: sometimes we just not in time filling cache answer = self.cache.get( DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, service.ttl, service.name)) if out is None and answer is not None: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) if answer: out.add_answer(msg, answer) if question.type == _TYPE_AXFR: if question.name in list(self.zones.keys()): if out is None: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) for i in self.zones[question.name].services.values(): out.add_answer(msg, i) else: try: if out is None: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) service = self.services.get(question.name.lower(), None) try: rs = service.records except: rs = [] # Answer A record queries for any service addresses we know if (question.type == _TYPE_A or \ question.type == _TYPE_ANY) \ and (_TYPE_A in rs): for service in self.services.values(): if service.server == question.name.lower(): for i in service.address: out.add_answer(msg, self.cache.get( DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, service.ttl, i))) if not service: continue if (question.type == _TYPE_SRV or \ question.type == _TYPE_ANY) and (_TYPE_SRV in rs): out.add_answer(msg, self.cache.get( DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, service.ttl, service.priority, service.weight, service.port, service.server))) if (question.type == _TYPE_TXT or \ question.type == _TYPE_ANY) and \ (_TYPE_TXT in rs): out.add_answer(msg, self.cache.get( DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, service.ttl, service.text))) if (question.type == _TYPE_SRV) and (_TYPE_SRV in rs): for i in service.address: out.add_additional_answer(self.cache.get( DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, service.ttl, i))) except: traceback.print_exc() if out is not None and out.answers: out.id = msg.id self.send(out, addr, port)
python
def handle_query(self, msg, addr, port, orig): """ Deal with incoming query packets. Provides a response if possible. msg - message to process addr - dst addr port - dst port orig - originating address (for adaptive records) """ out = None # Support unicast client responses # if port != _MDNS_PORT: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0) for question in msg.questions: out.add_question(question) for question in msg.questions: if question.type == _TYPE_PTR: for service in self.services.values(): if question.name == service.type: # FIXME: sometimes we just not in time filling cache answer = self.cache.get( DNSPointer(service.type, _TYPE_PTR, _CLASS_IN, service.ttl, service.name)) if out is None and answer is not None: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) if answer: out.add_answer(msg, answer) if question.type == _TYPE_AXFR: if question.name in list(self.zones.keys()): if out is None: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) for i in self.zones[question.name].services.values(): out.add_answer(msg, i) else: try: if out is None: out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) service = self.services.get(question.name.lower(), None) try: rs = service.records except: rs = [] # Answer A record queries for any service addresses we know if (question.type == _TYPE_A or \ question.type == _TYPE_ANY) \ and (_TYPE_A in rs): for service in self.services.values(): if service.server == question.name.lower(): for i in service.address: out.add_answer(msg, self.cache.get( DNSAddress(question.name, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, service.ttl, i))) if not service: continue if (question.type == _TYPE_SRV or \ question.type == _TYPE_ANY) and (_TYPE_SRV in rs): out.add_answer(msg, self.cache.get( DNSService(question.name, _TYPE_SRV, _CLASS_IN | _CLASS_UNIQUE, service.ttl, service.priority, service.weight, service.port, service.server))) if (question.type == _TYPE_TXT or \ question.type == _TYPE_ANY) and \ (_TYPE_TXT in rs): out.add_answer(msg, self.cache.get( DNSText(question.name, _TYPE_TXT, _CLASS_IN | _CLASS_UNIQUE, service.ttl, service.text))) if (question.type == _TYPE_SRV) and (_TYPE_SRV in rs): for i in service.address: out.add_additional_answer(self.cache.get( DNSAddress(service.server, _TYPE_A, _CLASS_IN | _CLASS_UNIQUE, service.ttl, i))) except: traceback.print_exc() if out is not None and out.answers: out.id = msg.id self.send(out, addr, port)
[ "def", "handle_query", "(", "self", ",", "msg", ",", "addr", ",", "port", ",", "orig", ")", ":", "out", "=", "None", "# Support unicast client responses", "#", "if", "port", "!=", "_MDNS_PORT", ":", "out", "=", "DNSOutgoing", "(", "_FLAGS_QR_RESPONSE", "|", "_FLAGS_AA", ",", "0", ")", "for", "question", "in", "msg", ".", "questions", ":", "out", ".", "add_question", "(", "question", ")", "for", "question", "in", "msg", ".", "questions", ":", "if", "question", ".", "type", "==", "_TYPE_PTR", ":", "for", "service", "in", "self", ".", "services", ".", "values", "(", ")", ":", "if", "question", ".", "name", "==", "service", ".", "type", ":", "# FIXME: sometimes we just not in time filling cache", "answer", "=", "self", ".", "cache", ".", "get", "(", "DNSPointer", "(", "service", ".", "type", ",", "_TYPE_PTR", ",", "_CLASS_IN", ",", "service", ".", "ttl", ",", "service", ".", "name", ")", ")", "if", "out", "is", "None", "and", "answer", "is", "not", "None", ":", "out", "=", "DNSOutgoing", "(", "_FLAGS_QR_RESPONSE", "|", "_FLAGS_AA", ")", "if", "answer", ":", "out", ".", "add_answer", "(", "msg", ",", "answer", ")", "if", "question", ".", "type", "==", "_TYPE_AXFR", ":", "if", "question", ".", "name", "in", "list", "(", "self", ".", "zones", ".", "keys", "(", ")", ")", ":", "if", "out", "is", "None", ":", "out", "=", "DNSOutgoing", "(", "_FLAGS_QR_RESPONSE", "|", "_FLAGS_AA", ")", "for", "i", "in", "self", ".", "zones", "[", "question", ".", "name", "]", ".", "services", ".", "values", "(", ")", ":", "out", ".", "add_answer", "(", "msg", ",", "i", ")", "else", ":", "try", ":", "if", "out", "is", "None", ":", "out", "=", "DNSOutgoing", "(", "_FLAGS_QR_RESPONSE", "|", "_FLAGS_AA", ")", "service", "=", "self", ".", "services", ".", "get", "(", "question", ".", "name", ".", "lower", "(", ")", ",", "None", ")", "try", ":", "rs", "=", "service", ".", "records", "except", ":", "rs", "=", "[", "]", "# Answer A record queries for any service addresses we know", "if", "(", "question", ".", "type", "==", "_TYPE_A", "or", "question", ".", "type", "==", "_TYPE_ANY", ")", "and", "(", "_TYPE_A", "in", "rs", ")", ":", "for", "service", "in", "self", ".", "services", ".", "values", "(", ")", ":", "if", "service", ".", "server", "==", "question", ".", "name", ".", "lower", "(", ")", ":", "for", "i", "in", "service", ".", "address", ":", "out", ".", "add_answer", "(", "msg", ",", "self", ".", "cache", ".", "get", "(", "DNSAddress", "(", "question", ".", "name", ",", "_TYPE_A", ",", "_CLASS_IN", "|", "_CLASS_UNIQUE", ",", "service", ".", "ttl", ",", "i", ")", ")", ")", "if", "not", "service", ":", "continue", "if", "(", "question", ".", "type", "==", "_TYPE_SRV", "or", "question", ".", "type", "==", "_TYPE_ANY", ")", "and", "(", "_TYPE_SRV", "in", "rs", ")", ":", "out", ".", "add_answer", "(", "msg", ",", "self", ".", "cache", ".", "get", "(", "DNSService", "(", "question", ".", "name", ",", "_TYPE_SRV", ",", "_CLASS_IN", "|", "_CLASS_UNIQUE", ",", "service", ".", "ttl", ",", "service", ".", "priority", ",", "service", ".", "weight", ",", "service", ".", "port", ",", "service", ".", "server", ")", ")", ")", "if", "(", "question", ".", "type", "==", "_TYPE_TXT", "or", "question", ".", "type", "==", "_TYPE_ANY", ")", "and", "(", "_TYPE_TXT", "in", "rs", ")", ":", "out", ".", "add_answer", "(", "msg", ",", "self", ".", "cache", ".", "get", "(", "DNSText", "(", "question", ".", "name", ",", "_TYPE_TXT", ",", "_CLASS_IN", "|", "_CLASS_UNIQUE", ",", "service", ".", "ttl", ",", "service", ".", "text", ")", ")", ")", "if", "(", "question", ".", "type", "==", "_TYPE_SRV", ")", "and", "(", "_TYPE_SRV", "in", "rs", ")", ":", "for", "i", "in", "service", ".", "address", ":", "out", ".", "add_additional_answer", "(", "self", ".", "cache", ".", "get", "(", "DNSAddress", "(", "service", ".", "server", ",", "_TYPE_A", ",", "_CLASS_IN", "|", "_CLASS_UNIQUE", ",", "service", ".", "ttl", ",", "i", ")", ")", ")", "except", ":", "traceback", ".", "print_exc", "(", ")", "if", "out", "is", "not", "None", "and", "out", ".", "answers", ":", "out", ".", "id", "=", "msg", ".", "id", "self", ".", "send", "(", "out", ",", "addr", ",", "port", ")" ]
Deal with incoming query packets. Provides a response if possible. msg - message to process addr - dst addr port - dst port orig - originating address (for adaptive records)
[ "Deal", "with", "incoming", "query", "packets", ".", "Provides", "a", "response", "if", "possible", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L2040-L2128
svinota/mdns
mdns/zeroconf.py
Zeroconf.send
def send(self, out, addr=_MDNS_ADDR, port=_MDNS_PORT): """Sends an outgoing packet.""" # This is a quick test to see if we can parse the packets we generate #temp = DNSIncoming(out.packet()) for i in self.intf.values(): try: return i.sendto(out.packet(), 0, (addr, port)) except: traceback.print_exc() # Ignore this, it may be a temporary loss of network connection return -1
python
def send(self, out, addr=_MDNS_ADDR, port=_MDNS_PORT): """Sends an outgoing packet.""" # This is a quick test to see if we can parse the packets we generate #temp = DNSIncoming(out.packet()) for i in self.intf.values(): try: return i.sendto(out.packet(), 0, (addr, port)) except: traceback.print_exc() # Ignore this, it may be a temporary loss of network connection return -1
[ "def", "send", "(", "self", ",", "out", ",", "addr", "=", "_MDNS_ADDR", ",", "port", "=", "_MDNS_PORT", ")", ":", "# This is a quick test to see if we can parse the packets we generate", "#temp = DNSIncoming(out.packet())", "for", "i", "in", "self", ".", "intf", ".", "values", "(", ")", ":", "try", ":", "return", "i", ".", "sendto", "(", "out", ".", "packet", "(", ")", ",", "0", ",", "(", "addr", ",", "port", ")", ")", "except", ":", "traceback", ".", "print_exc", "(", ")", "# Ignore this, it may be a temporary loss of network connection", "return", "-", "1" ]
Sends an outgoing packet.
[ "Sends", "an", "outgoing", "packet", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L2130-L2140
svinota/mdns
mdns/zeroconf.py
Zeroconf.close
def close(self): """Ends the background threads, and prevent this instance from servicing further queries.""" if globals()['_GLOBAL_DONE'] == 0: globals()['_GLOBAL_DONE'] = 1 self.notify_all() self.engine.notify() self.unregister_all_services() for i in self.intf.values(): try: # there are cases, when we start mDNS without network i.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + \ socket.inet_aton('0.0.0.0')) except: pass i.close()
python
def close(self): """Ends the background threads, and prevent this instance from servicing further queries.""" if globals()['_GLOBAL_DONE'] == 0: globals()['_GLOBAL_DONE'] = 1 self.notify_all() self.engine.notify() self.unregister_all_services() for i in self.intf.values(): try: # there are cases, when we start mDNS without network i.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, socket.inet_aton(_MDNS_ADDR) + \ socket.inet_aton('0.0.0.0')) except: pass i.close()
[ "def", "close", "(", "self", ")", ":", "if", "globals", "(", ")", "[", "'_GLOBAL_DONE'", "]", "==", "0", ":", "globals", "(", ")", "[", "'_GLOBAL_DONE'", "]", "=", "1", "self", ".", "notify_all", "(", ")", "self", ".", "engine", ".", "notify", "(", ")", "self", ".", "unregister_all_services", "(", ")", "for", "i", "in", "self", ".", "intf", ".", "values", "(", ")", ":", "try", ":", "# there are cases, when we start mDNS without network", "i", ".", "setsockopt", "(", "socket", ".", "SOL_IP", ",", "socket", ".", "IP_DROP_MEMBERSHIP", ",", "socket", ".", "inet_aton", "(", "_MDNS_ADDR", ")", "+", "socket", ".", "inet_aton", "(", "'0.0.0.0'", ")", ")", "except", ":", "pass", "i", ".", "close", "(", ")" ]
Ends the background threads, and prevent this instance from servicing further queries.
[ "Ends", "the", "background", "threads", "and", "prevent", "this", "instance", "from", "servicing", "further", "queries", "." ]
train
https://github.com/svinota/mdns/blob/295f6407132616a0ff7401124b9057d89555f91d/mdns/zeroconf.py#L2142-L2158
productml/blurr
blurr/runner/spark_runner.py
SparkRunner.execute
def execute(self, identity_records: 'RDD', old_state_rdd: Optional['RDD'] = None) -> 'RDD': """ Executes Blurr BTS with the given records. old_state_rdd can be provided to load an older state from a previous run. :param identity_records: RDD of the form Tuple[Identity, List[TimeAndRecord]] :param old_state_rdd: A previous streaming BTS state RDD as Tuple[Identity, Streaming BTS State] :return: RDD[Identity, Tuple[Streaming BTS State, List of Window BTS output]] """ identity_records_with_state = identity_records if old_state_rdd: identity_records_with_state = identity_records.fullOuterJoin(old_state_rdd) return identity_records_with_state.map(lambda x: self._execute_per_identity_records(x))
python
def execute(self, identity_records: 'RDD', old_state_rdd: Optional['RDD'] = None) -> 'RDD': """ Executes Blurr BTS with the given records. old_state_rdd can be provided to load an older state from a previous run. :param identity_records: RDD of the form Tuple[Identity, List[TimeAndRecord]] :param old_state_rdd: A previous streaming BTS state RDD as Tuple[Identity, Streaming BTS State] :return: RDD[Identity, Tuple[Streaming BTS State, List of Window BTS output]] """ identity_records_with_state = identity_records if old_state_rdd: identity_records_with_state = identity_records.fullOuterJoin(old_state_rdd) return identity_records_with_state.map(lambda x: self._execute_per_identity_records(x))
[ "def", "execute", "(", "self", ",", "identity_records", ":", "'RDD'", ",", "old_state_rdd", ":", "Optional", "[", "'RDD'", "]", "=", "None", ")", "->", "'RDD'", ":", "identity_records_with_state", "=", "identity_records", "if", "old_state_rdd", ":", "identity_records_with_state", "=", "identity_records", ".", "fullOuterJoin", "(", "old_state_rdd", ")", "return", "identity_records_with_state", ".", "map", "(", "lambda", "x", ":", "self", ".", "_execute_per_identity_records", "(", "x", ")", ")" ]
Executes Blurr BTS with the given records. old_state_rdd can be provided to load an older state from a previous run. :param identity_records: RDD of the form Tuple[Identity, List[TimeAndRecord]] :param old_state_rdd: A previous streaming BTS state RDD as Tuple[Identity, Streaming BTS State] :return: RDD[Identity, Tuple[Streaming BTS State, List of Window BTS output]]
[ "Executes", "Blurr", "BTS", "with", "the", "given", "records", ".", "old_state_rdd", "can", "be", "provided", "to", "load", "an", "older", "state", "from", "a", "previous", "run", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/runner/spark_runner.py#L80-L93
productml/blurr
blurr/runner/spark_runner.py
SparkRunner.get_record_rdd_from_json_files
def get_record_rdd_from_json_files(self, json_files: List[str], data_processor: DataProcessor = SimpleJsonDataProcessor(), spark_session: Optional['SparkSession'] = None) -> 'RDD': """ Reads the data from the given json_files path and converts them into the `Record`s format for processing. `data_processor` is used to process the per event data in those files to convert them into `Record`. :param json_files: List of json file paths. Regular Spark path wildcards are accepted. :param data_processor: `DataProcessor` to process each event in the json files. :param spark_session: `SparkSession` to use for execution. If None is provided then a basic `SparkSession` is created. :return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in `execute()` """ spark_context = get_spark_session(spark_session).sparkContext raw_records: 'RDD' = spark_context.union( [spark_context.textFile(file) for file in json_files]) return raw_records.mapPartitions( lambda x: self.get_per_identity_records(x, data_processor)).groupByKey().mapValues(list)
python
def get_record_rdd_from_json_files(self, json_files: List[str], data_processor: DataProcessor = SimpleJsonDataProcessor(), spark_session: Optional['SparkSession'] = None) -> 'RDD': """ Reads the data from the given json_files path and converts them into the `Record`s format for processing. `data_processor` is used to process the per event data in those files to convert them into `Record`. :param json_files: List of json file paths. Regular Spark path wildcards are accepted. :param data_processor: `DataProcessor` to process each event in the json files. :param spark_session: `SparkSession` to use for execution. If None is provided then a basic `SparkSession` is created. :return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in `execute()` """ spark_context = get_spark_session(spark_session).sparkContext raw_records: 'RDD' = spark_context.union( [spark_context.textFile(file) for file in json_files]) return raw_records.mapPartitions( lambda x: self.get_per_identity_records(x, data_processor)).groupByKey().mapValues(list)
[ "def", "get_record_rdd_from_json_files", "(", "self", ",", "json_files", ":", "List", "[", "str", "]", ",", "data_processor", ":", "DataProcessor", "=", "SimpleJsonDataProcessor", "(", ")", ",", "spark_session", ":", "Optional", "[", "'SparkSession'", "]", "=", "None", ")", "->", "'RDD'", ":", "spark_context", "=", "get_spark_session", "(", "spark_session", ")", ".", "sparkContext", "raw_records", ":", "'RDD'", "=", "spark_context", ".", "union", "(", "[", "spark_context", ".", "textFile", "(", "file", ")", "for", "file", "in", "json_files", "]", ")", "return", "raw_records", ".", "mapPartitions", "(", "lambda", "x", ":", "self", ".", "get_per_identity_records", "(", "x", ",", "data_processor", ")", ")", ".", "groupByKey", "(", ")", ".", "mapValues", "(", "list", ")" ]
Reads the data from the given json_files path and converts them into the `Record`s format for processing. `data_processor` is used to process the per event data in those files to convert them into `Record`. :param json_files: List of json file paths. Regular Spark path wildcards are accepted. :param data_processor: `DataProcessor` to process each event in the json files. :param spark_session: `SparkSession` to use for execution. If None is provided then a basic `SparkSession` is created. :return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in `execute()`
[ "Reads", "the", "data", "from", "the", "given", "json_files", "path", "and", "converts", "them", "into", "the", "Record", "s", "format", "for", "processing", ".", "data_processor", "is", "used", "to", "process", "the", "per", "event", "data", "in", "those", "files", "to", "convert", "them", "into", "Record", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/runner/spark_runner.py#L95-L115
productml/blurr
blurr/runner/spark_runner.py
SparkRunner.get_record_rdd_from_rdd
def get_record_rdd_from_rdd( self, rdd: 'RDD', data_processor: DataProcessor = SimpleDictionaryDataProcessor(), ) -> 'RDD': """ Converts a RDD of raw events into the `Record`s format for processing. `data_processor` is used to process the per row data to convert them into `Record`. :param rdd: RDD containing the raw events. :param data_processor: `DataProcessor` to process each row in the given `rdd`. :return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in `execute()` """ return rdd.mapPartitions( lambda x: self.get_per_identity_records(x, data_processor)).groupByKey().mapValues(list)
python
def get_record_rdd_from_rdd( self, rdd: 'RDD', data_processor: DataProcessor = SimpleDictionaryDataProcessor(), ) -> 'RDD': """ Converts a RDD of raw events into the `Record`s format for processing. `data_processor` is used to process the per row data to convert them into `Record`. :param rdd: RDD containing the raw events. :param data_processor: `DataProcessor` to process each row in the given `rdd`. :return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in `execute()` """ return rdd.mapPartitions( lambda x: self.get_per_identity_records(x, data_processor)).groupByKey().mapValues(list)
[ "def", "get_record_rdd_from_rdd", "(", "self", ",", "rdd", ":", "'RDD'", ",", "data_processor", ":", "DataProcessor", "=", "SimpleDictionaryDataProcessor", "(", ")", ",", ")", "->", "'RDD'", ":", "return", "rdd", ".", "mapPartitions", "(", "lambda", "x", ":", "self", ".", "get_per_identity_records", "(", "x", ",", "data_processor", ")", ")", ".", "groupByKey", "(", ")", ".", "mapValues", "(", "list", ")" ]
Converts a RDD of raw events into the `Record`s format for processing. `data_processor` is used to process the per row data to convert them into `Record`. :param rdd: RDD containing the raw events. :param data_processor: `DataProcessor` to process each row in the given `rdd`. :return: RDD containing Tuple[Identity, List[TimeAndRecord]] which can be used in `execute()`
[ "Converts", "a", "RDD", "of", "raw", "events", "into", "the", "Record", "s", "format", "for", "processing", ".", "data_processor", "is", "used", "to", "process", "the", "per", "row", "data", "to", "convert", "them", "into", "Record", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/runner/spark_runner.py#L117-L132
productml/blurr
blurr/runner/spark_runner.py
SparkRunner.write_output_file
def write_output_file(self, path: str, per_identity_data: 'RDD', spark_session: Optional['SparkSession'] = None) -> None: """ Basic helper function to persist data to disk. If window BTS was provided then the window BTS output to written in csv format, otherwise, the streaming BTS output is written in JSON format to the `path` provided :param path: Path where the output should be written. :param per_identity_data: Output of the `execute()` call. :param spark_session: `SparkSession` to use for execution. If None is provided then a basic `SparkSession` is created. :return: """ _spark_session_ = get_spark_session(spark_session) if not self._window_bts: per_identity_data.flatMap( lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()] ).saveAsTextFile(path) else: # Convert to a DataFrame first so that the data can be saved as a CSV _spark_session_.createDataFrame(per_identity_data.flatMap(lambda x: x[1][1])).write.csv( path, header=True)
python
def write_output_file(self, path: str, per_identity_data: 'RDD', spark_session: Optional['SparkSession'] = None) -> None: """ Basic helper function to persist data to disk. If window BTS was provided then the window BTS output to written in csv format, otherwise, the streaming BTS output is written in JSON format to the `path` provided :param path: Path where the output should be written. :param per_identity_data: Output of the `execute()` call. :param spark_session: `SparkSession` to use for execution. If None is provided then a basic `SparkSession` is created. :return: """ _spark_session_ = get_spark_session(spark_session) if not self._window_bts: per_identity_data.flatMap( lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()] ).saveAsTextFile(path) else: # Convert to a DataFrame first so that the data can be saved as a CSV _spark_session_.createDataFrame(per_identity_data.flatMap(lambda x: x[1][1])).write.csv( path, header=True)
[ "def", "write_output_file", "(", "self", ",", "path", ":", "str", ",", "per_identity_data", ":", "'RDD'", ",", "spark_session", ":", "Optional", "[", "'SparkSession'", "]", "=", "None", ")", "->", "None", ":", "_spark_session_", "=", "get_spark_session", "(", "spark_session", ")", "if", "not", "self", ".", "_window_bts", ":", "per_identity_data", ".", "flatMap", "(", "lambda", "x", ":", "[", "json", ".", "dumps", "(", "data", ",", "cls", "=", "BlurrJSONEncoder", ")", "for", "data", "in", "x", "[", "1", "]", "[", "0", "]", ".", "items", "(", ")", "]", ")", ".", "saveAsTextFile", "(", "path", ")", "else", ":", "# Convert to a DataFrame first so that the data can be saved as a CSV", "_spark_session_", ".", "createDataFrame", "(", "per_identity_data", ".", "flatMap", "(", "lambda", "x", ":", "x", "[", "1", "]", "[", "1", "]", ")", ")", ".", "write", ".", "csv", "(", "path", ",", "header", "=", "True", ")" ]
Basic helper function to persist data to disk. If window BTS was provided then the window BTS output to written in csv format, otherwise, the streaming BTS output is written in JSON format to the `path` provided :param path: Path where the output should be written. :param per_identity_data: Output of the `execute()` call. :param spark_session: `SparkSession` to use for execution. If None is provided then a basic `SparkSession` is created. :return:
[ "Basic", "helper", "function", "to", "persist", "data", "to", "disk", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/runner/spark_runner.py#L134-L158
productml/blurr
blurr/runner/spark_runner.py
SparkRunner.print_output
def print_output(self, per_identity_data: 'RDD') -> None: """ Basic helper function to write data to stdout. If window BTS was provided then the window BTS output is written, otherwise, the streaming BTS output is written to stdout. WARNING - For large datasets this will be extremely slow. :param per_identity_data: Output of the `execute()` call. """ if not self._window_bts: data = per_identity_data.flatMap( lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()]) else: # Convert to a DataFrame first so that the data can be saved as a CSV data = per_identity_data.map( lambda x: json.dumps((x[0], x[1][1]), cls=BlurrJSONEncoder)) for row in data.collect(): print(row)
python
def print_output(self, per_identity_data: 'RDD') -> None: """ Basic helper function to write data to stdout. If window BTS was provided then the window BTS output is written, otherwise, the streaming BTS output is written to stdout. WARNING - For large datasets this will be extremely slow. :param per_identity_data: Output of the `execute()` call. """ if not self._window_bts: data = per_identity_data.flatMap( lambda x: [json.dumps(data, cls=BlurrJSONEncoder) for data in x[1][0].items()]) else: # Convert to a DataFrame first so that the data can be saved as a CSV data = per_identity_data.map( lambda x: json.dumps((x[0], x[1][1]), cls=BlurrJSONEncoder)) for row in data.collect(): print(row)
[ "def", "print_output", "(", "self", ",", "per_identity_data", ":", "'RDD'", ")", "->", "None", ":", "if", "not", "self", ".", "_window_bts", ":", "data", "=", "per_identity_data", ".", "flatMap", "(", "lambda", "x", ":", "[", "json", ".", "dumps", "(", "data", ",", "cls", "=", "BlurrJSONEncoder", ")", "for", "data", "in", "x", "[", "1", "]", "[", "0", "]", ".", "items", "(", ")", "]", ")", "else", ":", "# Convert to a DataFrame first so that the data can be saved as a CSV", "data", "=", "per_identity_data", ".", "map", "(", "lambda", "x", ":", "json", ".", "dumps", "(", "(", "x", "[", "0", "]", ",", "x", "[", "1", "]", "[", "1", "]", ")", ",", "cls", "=", "BlurrJSONEncoder", ")", ")", "for", "row", "in", "data", ".", "collect", "(", ")", ":", "print", "(", "row", ")" ]
Basic helper function to write data to stdout. If window BTS was provided then the window BTS output is written, otherwise, the streaming BTS output is written to stdout. WARNING - For large datasets this will be extremely slow. :param per_identity_data: Output of the `execute()` call.
[ "Basic", "helper", "function", "to", "write", "data", "to", "stdout", ".", "If", "window", "BTS", "was", "provided", "then", "the", "window", "BTS", "output", "is", "written", "otherwise", "the", "streaming", "BTS", "output", "is", "written", "to", "stdout", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/runner/spark_runner.py#L160-L177
shin-/dockerpy-creds
dockerpycreds/utils.py
find_executable
def find_executable(executable, path=None): """ As distutils.spawn.find_executable, but on Windows, look up every extension declared in PATHEXT instead of just `.exe` """ if sys.platform != 'win32': return distutils.spawn.find_executable(executable, path) if path is None: path = os.environ['PATH'] paths = path.split(os.pathsep) extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep) base, ext = os.path.splitext(executable) if not os.path.isfile(executable): for p in paths: for ext in extensions: f = os.path.join(p, base + ext) if os.path.isfile(f): return f return None else: return executable
python
def find_executable(executable, path=None): """ As distutils.spawn.find_executable, but on Windows, look up every extension declared in PATHEXT instead of just `.exe` """ if sys.platform != 'win32': return distutils.spawn.find_executable(executable, path) if path is None: path = os.environ['PATH'] paths = path.split(os.pathsep) extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep) base, ext = os.path.splitext(executable) if not os.path.isfile(executable): for p in paths: for ext in extensions: f = os.path.join(p, base + ext) if os.path.isfile(f): return f return None else: return executable
[ "def", "find_executable", "(", "executable", ",", "path", "=", "None", ")", ":", "if", "sys", ".", "platform", "!=", "'win32'", ":", "return", "distutils", ".", "spawn", ".", "find_executable", "(", "executable", ",", "path", ")", "if", "path", "is", "None", ":", "path", "=", "os", ".", "environ", "[", "'PATH'", "]", "paths", "=", "path", ".", "split", "(", "os", ".", "pathsep", ")", "extensions", "=", "os", ".", "environ", ".", "get", "(", "'PATHEXT'", ",", "'.exe'", ")", ".", "split", "(", "os", ".", "pathsep", ")", "base", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "executable", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "executable", ")", ":", "for", "p", "in", "paths", ":", "for", "ext", "in", "extensions", ":", "f", "=", "os", ".", "path", ".", "join", "(", "p", ",", "base", "+", "ext", ")", "if", "os", ".", "path", ".", "isfile", "(", "f", ")", ":", "return", "f", "return", "None", "else", ":", "return", "executable" ]
As distutils.spawn.find_executable, but on Windows, look up every extension declared in PATHEXT instead of just `.exe`
[ "As", "distutils", ".", "spawn", ".", "find_executable", "but", "on", "Windows", "look", "up", "every", "extension", "declared", "in", "PATHEXT", "instead", "of", "just", ".", "exe" ]
train
https://github.com/shin-/dockerpy-creds/blob/9c0b66d2e445a838e1518f2c3273df7ddc7ec0d4/dockerpycreds/utils.py#L6-L29
shin-/dockerpy-creds
dockerpycreds/utils.py
create_environment_dict
def create_environment_dict(overrides): """ Create and return a copy of os.environ with the specified overrides """ result = os.environ.copy() result.update(overrides or {}) return result
python
def create_environment_dict(overrides): """ Create and return a copy of os.environ with the specified overrides """ result = os.environ.copy() result.update(overrides or {}) return result
[ "def", "create_environment_dict", "(", "overrides", ")", ":", "result", "=", "os", ".", "environ", ".", "copy", "(", ")", "result", ".", "update", "(", "overrides", "or", "{", "}", ")", "return", "result" ]
Create and return a copy of os.environ with the specified overrides
[ "Create", "and", "return", "a", "copy", "of", "os", ".", "environ", "with", "the", "specified", "overrides" ]
train
https://github.com/shin-/dockerpy-creds/blob/9c0b66d2e445a838e1518f2c3273df7ddc7ec0d4/dockerpycreds/utils.py#L32-L38
shin-/dockerpy-creds
dockerpycreds/store.py
Store.get
def get(self, server): """ Retrieve credentials for `server`. If no credentials are found, a `StoreError` will be raised. """ if not isinstance(server, six.binary_type): server = server.encode('utf-8') data = self._execute('get', server) result = json.loads(data.decode('utf-8')) # docker-credential-pass will return an object for inexistent servers # whereas other helpers will exit with returncode != 0. For # consistency, if no significant data is returned, # raise CredentialsNotFound if result['Username'] == '' and result['Secret'] == '': raise errors.CredentialsNotFound( 'No matching credentials in {}'.format(self.program) ) return result
python
def get(self, server): """ Retrieve credentials for `server`. If no credentials are found, a `StoreError` will be raised. """ if not isinstance(server, six.binary_type): server = server.encode('utf-8') data = self._execute('get', server) result = json.loads(data.decode('utf-8')) # docker-credential-pass will return an object for inexistent servers # whereas other helpers will exit with returncode != 0. For # consistency, if no significant data is returned, # raise CredentialsNotFound if result['Username'] == '' and result['Secret'] == '': raise errors.CredentialsNotFound( 'No matching credentials in {}'.format(self.program) ) return result
[ "def", "get", "(", "self", ",", "server", ")", ":", "if", "not", "isinstance", "(", "server", ",", "six", ".", "binary_type", ")", ":", "server", "=", "server", ".", "encode", "(", "'utf-8'", ")", "data", "=", "self", ".", "_execute", "(", "'get'", ",", "server", ")", "result", "=", "json", ".", "loads", "(", "data", ".", "decode", "(", "'utf-8'", ")", ")", "# docker-credential-pass will return an object for inexistent servers", "# whereas other helpers will exit with returncode != 0. For", "# consistency, if no significant data is returned,", "# raise CredentialsNotFound", "if", "result", "[", "'Username'", "]", "==", "''", "and", "result", "[", "'Secret'", "]", "==", "''", ":", "raise", "errors", ".", "CredentialsNotFound", "(", "'No matching credentials in {}'", ".", "format", "(", "self", ".", "program", ")", ")", "return", "result" ]
Retrieve credentials for `server`. If no credentials are found, a `StoreError` will be raised.
[ "Retrieve", "credentials", "for", "server", ".", "If", "no", "credentials", "are", "found", "a", "StoreError", "will", "be", "raised", "." ]
train
https://github.com/shin-/dockerpy-creds/blob/9c0b66d2e445a838e1518f2c3273df7ddc7ec0d4/dockerpycreds/store.py#L29-L47
shin-/dockerpy-creds
dockerpycreds/store.py
Store.store
def store(self, server, username, secret): """ Store credentials for `server`. Raises a `StoreError` if an error occurs. """ data_input = json.dumps({ 'ServerURL': server, 'Username': username, 'Secret': secret }).encode('utf-8') return self._execute('store', data_input)
python
def store(self, server, username, secret): """ Store credentials for `server`. Raises a `StoreError` if an error occurs. """ data_input = json.dumps({ 'ServerURL': server, 'Username': username, 'Secret': secret }).encode('utf-8') return self._execute('store', data_input)
[ "def", "store", "(", "self", ",", "server", ",", "username", ",", "secret", ")", ":", "data_input", "=", "json", ".", "dumps", "(", "{", "'ServerURL'", ":", "server", ",", "'Username'", ":", "username", ",", "'Secret'", ":", "secret", "}", ")", ".", "encode", "(", "'utf-8'", ")", "return", "self", ".", "_execute", "(", "'store'", ",", "data_input", ")" ]
Store credentials for `server`. Raises a `StoreError` if an error occurs.
[ "Store", "credentials", "for", "server", ".", "Raises", "a", "StoreError", "if", "an", "error", "occurs", "." ]
train
https://github.com/shin-/dockerpy-creds/blob/9c0b66d2e445a838e1518f2c3273df7ddc7ec0d4/dockerpycreds/store.py#L49-L58
shin-/dockerpy-creds
dockerpycreds/store.py
Store.erase
def erase(self, server): """ Erase credentials for `server`. Raises a `StoreError` if an error occurs. """ if not isinstance(server, six.binary_type): server = server.encode('utf-8') self._execute('erase', server)
python
def erase(self, server): """ Erase credentials for `server`. Raises a `StoreError` if an error occurs. """ if not isinstance(server, six.binary_type): server = server.encode('utf-8') self._execute('erase', server)
[ "def", "erase", "(", "self", ",", "server", ")", ":", "if", "not", "isinstance", "(", "server", ",", "six", ".", "binary_type", ")", ":", "server", "=", "server", ".", "encode", "(", "'utf-8'", ")", "self", ".", "_execute", "(", "'erase'", ",", "server", ")" ]
Erase credentials for `server`. Raises a `StoreError` if an error occurs.
[ "Erase", "credentials", "for", "server", ".", "Raises", "a", "StoreError", "if", "an", "error", "occurs", "." ]
train
https://github.com/shin-/dockerpy-creds/blob/9c0b66d2e445a838e1518f2c3273df7ddc7ec0d4/dockerpycreds/store.py#L60-L66
productml/blurr
blurr/core/transformer_streaming.py
StreamingTransformerSchema.get_identity
def get_identity(self, record: Record) -> str: """ Evaluates and returns the identity as specified in the schema. :param record: Record which is used to determine the identity. :return: The evaluated identity :raises: IdentityError if identity cannot be determined. """ context = self.schema_context.context context.add_record(record) identity = self.identity.evaluate(context) if not identity: raise IdentityError('Could not determine identity using {}. Record is {}'.format( self.identity.code_string, record)) context.remove_record() return identity
python
def get_identity(self, record: Record) -> str: """ Evaluates and returns the identity as specified in the schema. :param record: Record which is used to determine the identity. :return: The evaluated identity :raises: IdentityError if identity cannot be determined. """ context = self.schema_context.context context.add_record(record) identity = self.identity.evaluate(context) if not identity: raise IdentityError('Could not determine identity using {}. Record is {}'.format( self.identity.code_string, record)) context.remove_record() return identity
[ "def", "get_identity", "(", "self", ",", "record", ":", "Record", ")", "->", "str", ":", "context", "=", "self", ".", "schema_context", ".", "context", "context", ".", "add_record", "(", "record", ")", "identity", "=", "self", ".", "identity", ".", "evaluate", "(", "context", ")", "if", "not", "identity", ":", "raise", "IdentityError", "(", "'Could not determine identity using {}. Record is {}'", ".", "format", "(", "self", ".", "identity", ".", "code_string", ",", "record", ")", ")", "context", ".", "remove_record", "(", ")", "return", "identity" ]
Evaluates and returns the identity as specified in the schema. :param record: Record which is used to determine the identity. :return: The evaluated identity :raises: IdentityError if identity cannot be determined.
[ "Evaluates", "and", "returns", "the", "identity", "as", "specified", "in", "the", "schema", ".", ":", "param", "record", ":", "Record", "which", "is", "used", "to", "determine", "the", "identity", ".", ":", "return", ":", "The", "evaluated", "identity", ":", "raises", ":", "IdentityError", "if", "identity", "cannot", "be", "determined", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/transformer_streaming.py#L29-L43
productml/blurr
blurr/core/transformer_streaming.py
StreamingTransformer.run_evaluate
def run_evaluate(self, record: Record): """ Evaluates and updates data in the StreamingTransformer. :param record: The 'source' record used for the update. :raises: IdentityError if identity is different from the one used during initialization. """ record_identity = self._schema.get_identity(record) if self._identity != record_identity: raise IdentityError( 'Identity in transformer ({}) and new record ({}) do not match'.format( self._identity, record_identity)) # Add source record and time to the global context self._evaluation_context.add_record(record) self._evaluation_context.global_add( 'time', DateTimeFieldSchema.sanitize_object( self._schema.time.evaluate(self._evaluation_context))) super().run_evaluate() # Cleanup source and time form the context self._evaluation_context.remove_record() self._evaluation_context.global_remove('time')
python
def run_evaluate(self, record: Record): """ Evaluates and updates data in the StreamingTransformer. :param record: The 'source' record used for the update. :raises: IdentityError if identity is different from the one used during initialization. """ record_identity = self._schema.get_identity(record) if self._identity != record_identity: raise IdentityError( 'Identity in transformer ({}) and new record ({}) do not match'.format( self._identity, record_identity)) # Add source record and time to the global context self._evaluation_context.add_record(record) self._evaluation_context.global_add( 'time', DateTimeFieldSchema.sanitize_object( self._schema.time.evaluate(self._evaluation_context))) super().run_evaluate() # Cleanup source and time form the context self._evaluation_context.remove_record() self._evaluation_context.global_remove('time')
[ "def", "run_evaluate", "(", "self", ",", "record", ":", "Record", ")", ":", "record_identity", "=", "self", ".", "_schema", ".", "get_identity", "(", "record", ")", "if", "self", ".", "_identity", "!=", "record_identity", ":", "raise", "IdentityError", "(", "'Identity in transformer ({}) and new record ({}) do not match'", ".", "format", "(", "self", ".", "_identity", ",", "record_identity", ")", ")", "# Add source record and time to the global context", "self", ".", "_evaluation_context", ".", "add_record", "(", "record", ")", "self", ".", "_evaluation_context", ".", "global_add", "(", "'time'", ",", "DateTimeFieldSchema", ".", "sanitize_object", "(", "self", ".", "_schema", ".", "time", ".", "evaluate", "(", "self", ".", "_evaluation_context", ")", ")", ")", "super", "(", ")", ".", "run_evaluate", "(", ")", "# Cleanup source and time form the context", "self", ".", "_evaluation_context", ".", "remove_record", "(", ")", "self", ".", "_evaluation_context", ".", "global_remove", "(", "'time'", ")" ]
Evaluates and updates data in the StreamingTransformer. :param record: The 'source' record used for the update. :raises: IdentityError if identity is different from the one used during initialization.
[ "Evaluates", "and", "updates", "data", "in", "the", "StreamingTransformer", ".", ":", "param", "record", ":", "The", "source", "record", "used", "for", "the", "update", ".", ":", "raises", ":", "IdentityError", "if", "identity", "is", "different", "from", "the", "one", "used", "during", "initialization", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/transformer_streaming.py#L65-L88
productml/blurr
blurr/core/aggregate.py
AggregateSchema.extend_schema_spec
def extend_schema_spec(self) -> None: """ Injects the identity field """ super().extend_schema_spec() identity_field = { 'Name': '_identity', 'Type': BtsType.STRING, 'Value': 'identity', ATTRIBUTE_INTERNAL: True } if self.ATTRIBUTE_FIELDS in self._spec: self._spec[self.ATTRIBUTE_FIELDS].insert(0, identity_field) self.schema_loader.add_schema_spec(identity_field, self.fully_qualified_name)
python
def extend_schema_spec(self) -> None: """ Injects the identity field """ super().extend_schema_spec() identity_field = { 'Name': '_identity', 'Type': BtsType.STRING, 'Value': 'identity', ATTRIBUTE_INTERNAL: True } if self.ATTRIBUTE_FIELDS in self._spec: self._spec[self.ATTRIBUTE_FIELDS].insert(0, identity_field) self.schema_loader.add_schema_spec(identity_field, self.fully_qualified_name)
[ "def", "extend_schema_spec", "(", "self", ")", "->", "None", ":", "super", "(", ")", ".", "extend_schema_spec", "(", ")", "identity_field", "=", "{", "'Name'", ":", "'_identity'", ",", "'Type'", ":", "BtsType", ".", "STRING", ",", "'Value'", ":", "'identity'", ",", "ATTRIBUTE_INTERNAL", ":", "True", "}", "if", "self", ".", "ATTRIBUTE_FIELDS", "in", "self", ".", "_spec", ":", "self", ".", "_spec", "[", "self", ".", "ATTRIBUTE_FIELDS", "]", ".", "insert", "(", "0", ",", "identity_field", ")", "self", ".", "schema_loader", ".", "add_schema_spec", "(", "identity_field", ",", "self", ".", "fully_qualified_name", ")" ]
Injects the identity field
[ "Injects", "the", "identity", "field" ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/aggregate.py#L38-L51
productml/blurr
blurr/core/aggregate.py
Aggregate._persist
def _persist(self) -> None: """ Persists the current data group """ if self._store: self._store.save(self._key, self._snapshot)
python
def _persist(self) -> None: """ Persists the current data group """ if self._store: self._store.save(self._key, self._snapshot)
[ "def", "_persist", "(", "self", ")", "->", "None", ":", "if", "self", ".", "_store", ":", "self", ".", "_store", ".", "save", "(", "self", ".", "_key", ",", "self", ".", "_snapshot", ")" ]
Persists the current data group
[ "Persists", "the", "current", "data", "group" ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/aggregate.py#L97-L102
productml/blurr
blurr/core/schema_loader.py
SchemaLoader.add_schema_spec
def add_schema_spec(self, spec: Dict[str, Any], fully_qualified_parent_name: str = None) -> Optional[str]: """ Add a schema dictionary to the schema loader. The given schema is stored against fully_qualified_parent_name + ITEM_SEPARATOR('.') + schema.name. :param spec: Schema specification. :param fully_qualified_parent_name: Full qualified name of the parent. If None is passed then the schema is stored against the schema name. :return: The fully qualified name against which the spec is stored. None is returned if the given spec is not a dictionary or the spec does not contain a 'name' key. """ if not isinstance(spec, dict) or ATTRIBUTE_NAME not in spec: return None name = spec[ATTRIBUTE_NAME] fully_qualified_name = name if fully_qualified_parent_name is None else self.get_fully_qualified_name( fully_qualified_parent_name, name) # Ensure that basic validation for each spec part is done before it is added to spec cache if isinstance(spec, dict): self._error_cache.add( validate_required_attributes(fully_qualified_name, spec, ATTRIBUTE_NAME, ATTRIBUTE_TYPE)) if ATTRIBUTE_TYPE in spec and not Type.contains(spec[ATTRIBUTE_TYPE]): self._error_cache.add( InvalidTypeError(fully_qualified_name, spec, ATTRIBUTE_TYPE, InvalidTypeError.Reason.TYPE_NOT_DEFINED)) self._spec_cache[fully_qualified_name] = spec for key, val in spec.items(): if isinstance(val, list): for item in val: self.add_schema_spec(item, fully_qualified_name) self.add_schema_spec(val, fully_qualified_name) return spec[ATTRIBUTE_NAME]
python
def add_schema_spec(self, spec: Dict[str, Any], fully_qualified_parent_name: str = None) -> Optional[str]: """ Add a schema dictionary to the schema loader. The given schema is stored against fully_qualified_parent_name + ITEM_SEPARATOR('.') + schema.name. :param spec: Schema specification. :param fully_qualified_parent_name: Full qualified name of the parent. If None is passed then the schema is stored against the schema name. :return: The fully qualified name against which the spec is stored. None is returned if the given spec is not a dictionary or the spec does not contain a 'name' key. """ if not isinstance(spec, dict) or ATTRIBUTE_NAME not in spec: return None name = spec[ATTRIBUTE_NAME] fully_qualified_name = name if fully_qualified_parent_name is None else self.get_fully_qualified_name( fully_qualified_parent_name, name) # Ensure that basic validation for each spec part is done before it is added to spec cache if isinstance(spec, dict): self._error_cache.add( validate_required_attributes(fully_qualified_name, spec, ATTRIBUTE_NAME, ATTRIBUTE_TYPE)) if ATTRIBUTE_TYPE in spec and not Type.contains(spec[ATTRIBUTE_TYPE]): self._error_cache.add( InvalidTypeError(fully_qualified_name, spec, ATTRIBUTE_TYPE, InvalidTypeError.Reason.TYPE_NOT_DEFINED)) self._spec_cache[fully_qualified_name] = spec for key, val in spec.items(): if isinstance(val, list): for item in val: self.add_schema_spec(item, fully_qualified_name) self.add_schema_spec(val, fully_qualified_name) return spec[ATTRIBUTE_NAME]
[ "def", "add_schema_spec", "(", "self", ",", "spec", ":", "Dict", "[", "str", ",", "Any", "]", ",", "fully_qualified_parent_name", ":", "str", "=", "None", ")", "->", "Optional", "[", "str", "]", ":", "if", "not", "isinstance", "(", "spec", ",", "dict", ")", "or", "ATTRIBUTE_NAME", "not", "in", "spec", ":", "return", "None", "name", "=", "spec", "[", "ATTRIBUTE_NAME", "]", "fully_qualified_name", "=", "name", "if", "fully_qualified_parent_name", "is", "None", "else", "self", ".", "get_fully_qualified_name", "(", "fully_qualified_parent_name", ",", "name", ")", "# Ensure that basic validation for each spec part is done before it is added to spec cache", "if", "isinstance", "(", "spec", ",", "dict", ")", ":", "self", ".", "_error_cache", ".", "add", "(", "validate_required_attributes", "(", "fully_qualified_name", ",", "spec", ",", "ATTRIBUTE_NAME", ",", "ATTRIBUTE_TYPE", ")", ")", "if", "ATTRIBUTE_TYPE", "in", "spec", "and", "not", "Type", ".", "contains", "(", "spec", "[", "ATTRIBUTE_TYPE", "]", ")", ":", "self", ".", "_error_cache", ".", "add", "(", "InvalidTypeError", "(", "fully_qualified_name", ",", "spec", ",", "ATTRIBUTE_TYPE", ",", "InvalidTypeError", ".", "Reason", ".", "TYPE_NOT_DEFINED", ")", ")", "self", ".", "_spec_cache", "[", "fully_qualified_name", "]", "=", "spec", "for", "key", ",", "val", "in", "spec", ".", "items", "(", ")", ":", "if", "isinstance", "(", "val", ",", "list", ")", ":", "for", "item", "in", "val", ":", "self", ".", "add_schema_spec", "(", "item", ",", "fully_qualified_name", ")", "self", ".", "add_schema_spec", "(", "val", ",", "fully_qualified_name", ")", "return", "spec", "[", "ATTRIBUTE_NAME", "]" ]
Add a schema dictionary to the schema loader. The given schema is stored against fully_qualified_parent_name + ITEM_SEPARATOR('.') + schema.name. :param spec: Schema specification. :param fully_qualified_parent_name: Full qualified name of the parent. If None is passed then the schema is stored against the schema name. :return: The fully qualified name against which the spec is stored. None is returned if the given spec is not a dictionary or the spec does not contain a 'name' key.
[ "Add", "a", "schema", "dictionary", "to", "the", "schema", "loader", ".", "The", "given", "schema", "is", "stored", "against", "fully_qualified_parent_name", "+", "ITEM_SEPARATOR", "(", ".", ")", "+", "schema", ".", "name", ".", ":", "param", "spec", ":", "Schema", "specification", ".", ":", "param", "fully_qualified_parent_name", ":", "Full", "qualified", "name", "of", "the", "parent", ".", "If", "None", "is", "passed", "then", "the", "schema", "is", "stored", "against", "the", "schema", "name", ".", ":", "return", ":", "The", "fully", "qualified", "name", "against", "which", "the", "spec", "is", "stored", ".", "None", "is", "returned", "if", "the", "given", "spec", "is", "not", "a", "dictionary", "or", "the", "spec", "does", "not", "contain", "a", "name", "key", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/schema_loader.py#L22-L58
productml/blurr
blurr/core/schema_loader.py
SchemaLoader.add_errors
def add_errors(self, *errors: Union[BaseSchemaError, SchemaErrorCollection]) -> None: """ Adds errors to the error store for the schema """ for error in errors: self._error_cache.add(error)
python
def add_errors(self, *errors: Union[BaseSchemaError, SchemaErrorCollection]) -> None: """ Adds errors to the error store for the schema """ for error in errors: self._error_cache.add(error)
[ "def", "add_errors", "(", "self", ",", "*", "errors", ":", "Union", "[", "BaseSchemaError", ",", "SchemaErrorCollection", "]", ")", "->", "None", ":", "for", "error", "in", "errors", ":", "self", ".", "_error_cache", ".", "add", "(", "error", ")" ]
Adds errors to the error store for the schema
[ "Adds", "errors", "to", "the", "error", "store", "for", "the", "schema" ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/schema_loader.py#L60-L63
productml/blurr
blurr/core/schema_loader.py
SchemaLoader.get_schema_object
def get_schema_object(self, fully_qualified_name: str) -> 'BaseSchema': """ Used to generate a schema object from the given fully_qualified_name. :param fully_qualified_name: The fully qualified name of the object needed. :return: An initialized schema object """ if fully_qualified_name not in self._schema_cache: spec = self.get_schema_spec(fully_qualified_name) if spec: try: self._schema_cache[fully_qualified_name] = TypeLoader.load_schema( spec.get(ATTRIBUTE_TYPE, None))(fully_qualified_name, self) except TypeLoaderError as err: self.add_errors( InvalidTypeError(fully_qualified_name, spec, ATTRIBUTE_TYPE, InvalidTypeError.Reason.TYPE_NOT_LOADED, err.type_class_name)) return self._schema_cache.get(fully_qualified_name, None)
python
def get_schema_object(self, fully_qualified_name: str) -> 'BaseSchema': """ Used to generate a schema object from the given fully_qualified_name. :param fully_qualified_name: The fully qualified name of the object needed. :return: An initialized schema object """ if fully_qualified_name not in self._schema_cache: spec = self.get_schema_spec(fully_qualified_name) if spec: try: self._schema_cache[fully_qualified_name] = TypeLoader.load_schema( spec.get(ATTRIBUTE_TYPE, None))(fully_qualified_name, self) except TypeLoaderError as err: self.add_errors( InvalidTypeError(fully_qualified_name, spec, ATTRIBUTE_TYPE, InvalidTypeError.Reason.TYPE_NOT_LOADED, err.type_class_name)) return self._schema_cache.get(fully_qualified_name, None)
[ "def", "get_schema_object", "(", "self", ",", "fully_qualified_name", ":", "str", ")", "->", "'BaseSchema'", ":", "if", "fully_qualified_name", "not", "in", "self", ".", "_schema_cache", ":", "spec", "=", "self", ".", "get_schema_spec", "(", "fully_qualified_name", ")", "if", "spec", ":", "try", ":", "self", ".", "_schema_cache", "[", "fully_qualified_name", "]", "=", "TypeLoader", ".", "load_schema", "(", "spec", ".", "get", "(", "ATTRIBUTE_TYPE", ",", "None", ")", ")", "(", "fully_qualified_name", ",", "self", ")", "except", "TypeLoaderError", "as", "err", ":", "self", ".", "add_errors", "(", "InvalidTypeError", "(", "fully_qualified_name", ",", "spec", ",", "ATTRIBUTE_TYPE", ",", "InvalidTypeError", ".", "Reason", ".", "TYPE_NOT_LOADED", ",", "err", ".", "type_class_name", ")", ")", "return", "self", ".", "_schema_cache", ".", "get", "(", "fully_qualified_name", ",", "None", ")" ]
Used to generate a schema object from the given fully_qualified_name. :param fully_qualified_name: The fully qualified name of the object needed. :return: An initialized schema object
[ "Used", "to", "generate", "a", "schema", "object", "from", "the", "given", "fully_qualified_name", ".", ":", "param", "fully_qualified_name", ":", "The", "fully", "qualified", "name", "of", "the", "object", "needed", ".", ":", "return", ":", "An", "initialized", "schema", "object" ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/schema_loader.py#L80-L100
productml/blurr
blurr/core/schema_loader.py
SchemaLoader.get_store
def get_store(self, fully_qualified_name: str) -> Optional['Store']: """ Used to generate a store object from the given fully_qualified_name. :param fully_qualified_name: The fully qualified name of the store object needed. :return: An initialized store object """ if fully_qualified_name not in self._store_cache: schema = self.get_schema_object(fully_qualified_name) if not schema: return None if Type.is_store_type(schema.type): self._store_cache[fully_qualified_name] = TypeLoader.load_item(schema.type)(schema) else: self.add_errors( InvalidTypeError(fully_qualified_name, {}, ATTRIBUTE_TYPE, InvalidTypeError.Reason.INCORRECT_BASE, schema.type, InvalidTypeError.BaseTypes.STORE)) return self._store_cache.get(fully_qualified_name, None)
python
def get_store(self, fully_qualified_name: str) -> Optional['Store']: """ Used to generate a store object from the given fully_qualified_name. :param fully_qualified_name: The fully qualified name of the store object needed. :return: An initialized store object """ if fully_qualified_name not in self._store_cache: schema = self.get_schema_object(fully_qualified_name) if not schema: return None if Type.is_store_type(schema.type): self._store_cache[fully_qualified_name] = TypeLoader.load_item(schema.type)(schema) else: self.add_errors( InvalidTypeError(fully_qualified_name, {}, ATTRIBUTE_TYPE, InvalidTypeError.Reason.INCORRECT_BASE, schema.type, InvalidTypeError.BaseTypes.STORE)) return self._store_cache.get(fully_qualified_name, None)
[ "def", "get_store", "(", "self", ",", "fully_qualified_name", ":", "str", ")", "->", "Optional", "[", "'Store'", "]", ":", "if", "fully_qualified_name", "not", "in", "self", ".", "_store_cache", ":", "schema", "=", "self", ".", "get_schema_object", "(", "fully_qualified_name", ")", "if", "not", "schema", ":", "return", "None", "if", "Type", ".", "is_store_type", "(", "schema", ".", "type", ")", ":", "self", ".", "_store_cache", "[", "fully_qualified_name", "]", "=", "TypeLoader", ".", "load_item", "(", "schema", ".", "type", ")", "(", "schema", ")", "else", ":", "self", ".", "add_errors", "(", "InvalidTypeError", "(", "fully_qualified_name", ",", "{", "}", ",", "ATTRIBUTE_TYPE", ",", "InvalidTypeError", ".", "Reason", ".", "INCORRECT_BASE", ",", "schema", ".", "type", ",", "InvalidTypeError", ".", "BaseTypes", ".", "STORE", ")", ")", "return", "self", ".", "_store_cache", ".", "get", "(", "fully_qualified_name", ",", "None", ")" ]
Used to generate a store object from the given fully_qualified_name. :param fully_qualified_name: The fully qualified name of the store object needed. :return: An initialized store object
[ "Used", "to", "generate", "a", "store", "object", "from", "the", "given", "fully_qualified_name", ".", ":", "param", "fully_qualified_name", ":", "The", "fully", "qualified", "name", "of", "the", "store", "object", "needed", ".", ":", "return", ":", "An", "initialized", "store", "object" ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/schema_loader.py#L102-L122
productml/blurr
blurr/core/schema_loader.py
SchemaLoader.get_nested_schema_object
def get_nested_schema_object(self, fully_qualified_parent_name: str, nested_item_name: str) -> Optional['BaseSchema']: """ Used to generate a schema object from the given fully_qualified_parent_name and the nested_item_name. :param fully_qualified_parent_name: The fully qualified name of the parent. :param nested_item_name: The nested item name. :return: An initialized schema object of the nested item. """ return self.get_schema_object( self.get_fully_qualified_name(fully_qualified_parent_name, nested_item_name))
python
def get_nested_schema_object(self, fully_qualified_parent_name: str, nested_item_name: str) -> Optional['BaseSchema']: """ Used to generate a schema object from the given fully_qualified_parent_name and the nested_item_name. :param fully_qualified_parent_name: The fully qualified name of the parent. :param nested_item_name: The nested item name. :return: An initialized schema object of the nested item. """ return self.get_schema_object( self.get_fully_qualified_name(fully_qualified_parent_name, nested_item_name))
[ "def", "get_nested_schema_object", "(", "self", ",", "fully_qualified_parent_name", ":", "str", ",", "nested_item_name", ":", "str", ")", "->", "Optional", "[", "'BaseSchema'", "]", ":", "return", "self", ".", "get_schema_object", "(", "self", ".", "get_fully_qualified_name", "(", "fully_qualified_parent_name", ",", "nested_item_name", ")", ")" ]
Used to generate a schema object from the given fully_qualified_parent_name and the nested_item_name. :param fully_qualified_parent_name: The fully qualified name of the parent. :param nested_item_name: The nested item name. :return: An initialized schema object of the nested item.
[ "Used", "to", "generate", "a", "schema", "object", "from", "the", "given", "fully_qualified_parent_name", "and", "the", "nested_item_name", ".", ":", "param", "fully_qualified_parent_name", ":", "The", "fully", "qualified", "name", "of", "the", "parent", ".", ":", "param", "nested_item_name", ":", "The", "nested", "item", "name", ".", ":", "return", ":", "An", "initialized", "schema", "object", "of", "the", "nested", "item", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/schema_loader.py#L130-L140
productml/blurr
blurr/core/schema_loader.py
SchemaLoader.get_fully_qualified_name
def get_fully_qualified_name(fully_qualified_parent_name: str, nested_item_name: str) -> str: """ Returns the fully qualified name by combining the fully_qualified_parent_name and nested_item_name. :param fully_qualified_parent_name: The fully qualified name of the parent. :param nested_item_name: The nested item name. :return: The fully qualified name of the nested item. """ return fully_qualified_parent_name + SchemaLoader.ITEM_SEPARATOR + nested_item_name
python
def get_fully_qualified_name(fully_qualified_parent_name: str, nested_item_name: str) -> str: """ Returns the fully qualified name by combining the fully_qualified_parent_name and nested_item_name. :param fully_qualified_parent_name: The fully qualified name of the parent. :param nested_item_name: The nested item name. :return: The fully qualified name of the nested item. """ return fully_qualified_parent_name + SchemaLoader.ITEM_SEPARATOR + nested_item_name
[ "def", "get_fully_qualified_name", "(", "fully_qualified_parent_name", ":", "str", ",", "nested_item_name", ":", "str", ")", "->", "str", ":", "return", "fully_qualified_parent_name", "+", "SchemaLoader", ".", "ITEM_SEPARATOR", "+", "nested_item_name" ]
Returns the fully qualified name by combining the fully_qualified_parent_name and nested_item_name. :param fully_qualified_parent_name: The fully qualified name of the parent. :param nested_item_name: The nested item name. :return: The fully qualified name of the nested item.
[ "Returns", "the", "fully", "qualified", "name", "by", "combining", "the", "fully_qualified_parent_name", "and", "nested_item_name", ".", ":", "param", "fully_qualified_parent_name", ":", "The", "fully", "qualified", "name", "of", "the", "parent", ".", ":", "param", "nested_item_name", ":", "The", "nested", "item", "name", ".", ":", "return", ":", "The", "fully", "qualified", "name", "of", "the", "nested", "item", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/schema_loader.py#L143-L151
productml/blurr
blurr/core/schema_loader.py
SchemaLoader.get_schema_spec
def get_schema_spec(self, fully_qualified_name: str) -> Dict[str, Any]: """ Used to retrieve the specifications of the schema from the given fully_qualified_name of schema. :param fully_qualified_name: The fully qualified name of the schema needed. :return: Schema dictionary. """ if fully_qualified_name not in self._spec_cache: self.add_errors(SpecNotFoundError(fully_qualified_name, {})) return self._spec_cache.get(fully_qualified_name, None)
python
def get_schema_spec(self, fully_qualified_name: str) -> Dict[str, Any]: """ Used to retrieve the specifications of the schema from the given fully_qualified_name of schema. :param fully_qualified_name: The fully qualified name of the schema needed. :return: Schema dictionary. """ if fully_qualified_name not in self._spec_cache: self.add_errors(SpecNotFoundError(fully_qualified_name, {})) return self._spec_cache.get(fully_qualified_name, None)
[ "def", "get_schema_spec", "(", "self", ",", "fully_qualified_name", ":", "str", ")", "->", "Dict", "[", "str", ",", "Any", "]", ":", "if", "fully_qualified_name", "not", "in", "self", ".", "_spec_cache", ":", "self", ".", "add_errors", "(", "SpecNotFoundError", "(", "fully_qualified_name", ",", "{", "}", ")", ")", "return", "self", ".", "_spec_cache", ".", "get", "(", "fully_qualified_name", ",", "None", ")" ]
Used to retrieve the specifications of the schema from the given fully_qualified_name of schema. :param fully_qualified_name: The fully qualified name of the schema needed. :return: Schema dictionary.
[ "Used", "to", "retrieve", "the", "specifications", "of", "the", "schema", "from", "the", "given", "fully_qualified_name", "of", "schema", ".", ":", "param", "fully_qualified_name", ":", "The", "fully", "qualified", "name", "of", "the", "schema", "needed", ".", ":", "return", ":", "Schema", "dictionary", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/schema_loader.py#L153-L164
productml/blurr
blurr/core/schema_loader.py
SchemaLoader.get_schema_specs_of_type
def get_schema_specs_of_type(self, *schema_types: Type) -> Dict[str, Dict[str, Any]]: """ Returns a list of fully qualified names and schema dictionary tuples for the schema types provided. :param schema_types: Schema types. :return: List of fully qualified names and schema dictionary tuples. """ return { fq_name: schema for fq_name, schema in self._spec_cache.items() if Type.is_type_in(schema.get(ATTRIBUTE_TYPE, ''), list(schema_types)) }
python
def get_schema_specs_of_type(self, *schema_types: Type) -> Dict[str, Dict[str, Any]]: """ Returns a list of fully qualified names and schema dictionary tuples for the schema types provided. :param schema_types: Schema types. :return: List of fully qualified names and schema dictionary tuples. """ return { fq_name: schema for fq_name, schema in self._spec_cache.items() if Type.is_type_in(schema.get(ATTRIBUTE_TYPE, ''), list(schema_types)) }
[ "def", "get_schema_specs_of_type", "(", "self", ",", "*", "schema_types", ":", "Type", ")", "->", "Dict", "[", "str", ",", "Dict", "[", "str", ",", "Any", "]", "]", ":", "return", "{", "fq_name", ":", "schema", "for", "fq_name", ",", "schema", "in", "self", ".", "_spec_cache", ".", "items", "(", ")", "if", "Type", ".", "is_type_in", "(", "schema", ".", "get", "(", "ATTRIBUTE_TYPE", ",", "''", ")", ",", "list", "(", "schema_types", ")", ")", "}" ]
Returns a list of fully qualified names and schema dictionary tuples for the schema types provided. :param schema_types: Schema types. :return: List of fully qualified names and schema dictionary tuples.
[ "Returns", "a", "list", "of", "fully", "qualified", "names", "and", "schema", "dictionary", "tuples", "for", "the", "schema", "types", "provided", ".", ":", "param", "schema_types", ":", "Schema", "types", ".", ":", "return", ":", "List", "of", "fully", "qualified", "names", "and", "schema", "dictionary", "tuples", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/schema_loader.py#L175-L187
productml/blurr
blurr/core/evaluation.py
EvaluationContext.global_add
def global_add(self, key: str, value: Any) -> None: """ Adds a key and value to the global dictionary """ self.global_context[key] = value
python
def global_add(self, key: str, value: Any) -> None: """ Adds a key and value to the global dictionary """ self.global_context[key] = value
[ "def", "global_add", "(", "self", ",", "key", ":", "str", ",", "value", ":", "Any", ")", "->", "None", ":", "self", ".", "global_context", "[", "key", "]", "=", "value" ]
Adds a key and value to the global dictionary
[ "Adds", "a", "key", "and", "value", "to", "the", "global", "dictionary" ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/evaluation.py#L68-L72
productml/blurr
blurr/core/evaluation.py
EvaluationContext.merge
def merge(self, evaluation_context: 'EvaluationContext') -> None: """ Merges the provided evaluation context to the current evaluation context. :param evaluation_context: Evaluation context to merge. """ self.global_context.merge(evaluation_context.global_context) self.local_context.merge(evaluation_context.local_context)
python
def merge(self, evaluation_context: 'EvaluationContext') -> None: """ Merges the provided evaluation context to the current evaluation context. :param evaluation_context: Evaluation context to merge. """ self.global_context.merge(evaluation_context.global_context) self.local_context.merge(evaluation_context.local_context)
[ "def", "merge", "(", "self", ",", "evaluation_context", ":", "'EvaluationContext'", ")", "->", "None", ":", "self", ".", "global_context", ".", "merge", "(", "evaluation_context", ".", "global_context", ")", "self", ".", "local_context", ".", "merge", "(", "evaluation_context", ".", "local_context", ")" ]
Merges the provided evaluation context to the current evaluation context. :param evaluation_context: Evaluation context to merge.
[ "Merges", "the", "provided", "evaluation", "context", "to", "the", "current", "evaluation", "context", ".", ":", "param", "evaluation_context", ":", "Evaluation", "context", "to", "merge", "." ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/evaluation.py#L91-L97
productml/blurr
blurr/core/evaluation.py
Expression.evaluate
def evaluate(self, evaluation_context: EvaluationContext) -> Any: """ Evaluates the expression with the context provided. If the execution results in failure, an ExpressionEvaluationException encapsulating the underlying exception is raised. :param evaluation_context: Global and local context dictionary to be passed for evaluation """ try: if self.type == ExpressionType.EVAL: return eval(self.code_object, evaluation_context.global_context, evaluation_context.local_context) elif self.type == ExpressionType.EXEC: return exec(self.code_object, evaluation_context.global_context, evaluation_context.local_context) except Exception as err: # Evaluation exceptions are expected because of missing fields in the source 'Record'. logging.debug('{} in evaluating expression {}. Error: {}'.format( type(err).__name__, self.code_string, err)) # These should result in an exception being raised: # NameError - Exceptions thrown because of using names in the expression which are not # present in EvaluationContext. A common cause for this is typos in the BTS. # MissingAttributeError - Exception thrown when a BTS nested item is used which does not # exist. Should only happen for erroneous BTSs. # ImportError - Thrown when there is a failure in importing other modules. if isinstance(err, (NameError, MissingAttributeError, ImportError)): raise err return None
python
def evaluate(self, evaluation_context: EvaluationContext) -> Any: """ Evaluates the expression with the context provided. If the execution results in failure, an ExpressionEvaluationException encapsulating the underlying exception is raised. :param evaluation_context: Global and local context dictionary to be passed for evaluation """ try: if self.type == ExpressionType.EVAL: return eval(self.code_object, evaluation_context.global_context, evaluation_context.local_context) elif self.type == ExpressionType.EXEC: return exec(self.code_object, evaluation_context.global_context, evaluation_context.local_context) except Exception as err: # Evaluation exceptions are expected because of missing fields in the source 'Record'. logging.debug('{} in evaluating expression {}. Error: {}'.format( type(err).__name__, self.code_string, err)) # These should result in an exception being raised: # NameError - Exceptions thrown because of using names in the expression which are not # present in EvaluationContext. A common cause for this is typos in the BTS. # MissingAttributeError - Exception thrown when a BTS nested item is used which does not # exist. Should only happen for erroneous BTSs. # ImportError - Thrown when there is a failure in importing other modules. if isinstance(err, (NameError, MissingAttributeError, ImportError)): raise err return None
[ "def", "evaluate", "(", "self", ",", "evaluation_context", ":", "EvaluationContext", ")", "->", "Any", ":", "try", ":", "if", "self", ".", "type", "==", "ExpressionType", ".", "EVAL", ":", "return", "eval", "(", "self", ".", "code_object", ",", "evaluation_context", ".", "global_context", ",", "evaluation_context", ".", "local_context", ")", "elif", "self", ".", "type", "==", "ExpressionType", ".", "EXEC", ":", "return", "exec", "(", "self", ".", "code_object", ",", "evaluation_context", ".", "global_context", ",", "evaluation_context", ".", "local_context", ")", "except", "Exception", "as", "err", ":", "# Evaluation exceptions are expected because of missing fields in the source 'Record'.", "logging", ".", "debug", "(", "'{} in evaluating expression {}. Error: {}'", ".", "format", "(", "type", "(", "err", ")", ".", "__name__", ",", "self", ".", "code_string", ",", "err", ")", ")", "# These should result in an exception being raised:", "# NameError - Exceptions thrown because of using names in the expression which are not", "# present in EvaluationContext. A common cause for this is typos in the BTS.", "# MissingAttributeError - Exception thrown when a BTS nested item is used which does not", "# exist. Should only happen for erroneous BTSs.", "# ImportError - Thrown when there is a failure in importing other modules.", "if", "isinstance", "(", "err", ",", "(", "NameError", ",", "MissingAttributeError", ",", "ImportError", ")", ")", ":", "raise", "err", "return", "None" ]
Evaluates the expression with the context provided. If the execution results in failure, an ExpressionEvaluationException encapsulating the underlying exception is raised. :param evaluation_context: Global and local context dictionary to be passed for evaluation
[ "Evaluates", "the", "expression", "with", "the", "context", "provided", ".", "If", "the", "execution", "results", "in", "failure", "an", "ExpressionEvaluationException", "encapsulating", "the", "underlying", "exception", "is", "raised", ".", ":", "param", "evaluation_context", ":", "Global", "and", "local", "context", "dictionary", "to", "be", "passed", "for", "evaluation" ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/evaluation.py#L120-L149
openclimatedata/pymagicc
pymagicc/core.py
_copy_files
def _copy_files(source, target): """ Copy all the files in source directory to target. Ignores subdirectories. """ source_files = listdir(source) if not exists(target): makedirs(target) for filename in source_files: full_filename = join(source, filename) if isfile(full_filename): shutil.copy(full_filename, target)
python
def _copy_files(source, target): """ Copy all the files in source directory to target. Ignores subdirectories. """ source_files = listdir(source) if not exists(target): makedirs(target) for filename in source_files: full_filename = join(source, filename) if isfile(full_filename): shutil.copy(full_filename, target)
[ "def", "_copy_files", "(", "source", ",", "target", ")", ":", "source_files", "=", "listdir", "(", "source", ")", "if", "not", "exists", "(", "target", ")", ":", "makedirs", "(", "target", ")", "for", "filename", "in", "source_files", ":", "full_filename", "=", "join", "(", "source", ",", "filename", ")", "if", "isfile", "(", "full_filename", ")", ":", "shutil", ".", "copy", "(", "full_filename", ",", "target", ")" ]
Copy all the files in source directory to target. Ignores subdirectories.
[ "Copy", "all", "the", "files", "in", "source", "directory", "to", "target", "." ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L35-L47
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.create_copy
def create_copy(self): """ Initialises a temporary directory structure and copy of MAGICC configuration files and binary. """ if self.executable is None or not isfile(self.executable): raise FileNotFoundError( "Could not find MAGICC{} executable: {}".format( self.version, self.executable ) ) if self.is_temp: assert ( self.root_dir is None ), "A temp copy for this instance has already been created" self.root_dir = mkdtemp(prefix="pymagicc-") if exists(self.run_dir): raise Exception("A copy of MAGICC has already been created.") if not exists(self.root_dir): makedirs(self.root_dir) exec_dir = basename(self.original_dir) # Copy a subset of folders from the MAGICC `original_dir` # Also copy anything which is in the root of the MAGICC distribution # Assumes that the MAGICC binary is in a folder one level below the root # of the MAGICC distribution. i.e. /run/magicc.exe or /bin/magicc dirs_to_copy = [".", "bin", "run"] # Check that the executable is in a valid sub directory assert exec_dir in dirs_to_copy, "binary must be in bin/ or run/ directory" for d in dirs_to_copy: source_dir = abspath(join(self.original_dir, "..", d)) if exists(source_dir): _copy_files(source_dir, join(self.root_dir, d)) # Create an empty out dir # MAGICC assumes that the 'out' directory already exists makedirs(join(self.root_dir, "out")) # Create basic configuration files so magicc can run self.set_years() self.set_config()
python
def create_copy(self): """ Initialises a temporary directory structure and copy of MAGICC configuration files and binary. """ if self.executable is None or not isfile(self.executable): raise FileNotFoundError( "Could not find MAGICC{} executable: {}".format( self.version, self.executable ) ) if self.is_temp: assert ( self.root_dir is None ), "A temp copy for this instance has already been created" self.root_dir = mkdtemp(prefix="pymagicc-") if exists(self.run_dir): raise Exception("A copy of MAGICC has already been created.") if not exists(self.root_dir): makedirs(self.root_dir) exec_dir = basename(self.original_dir) # Copy a subset of folders from the MAGICC `original_dir` # Also copy anything which is in the root of the MAGICC distribution # Assumes that the MAGICC binary is in a folder one level below the root # of the MAGICC distribution. i.e. /run/magicc.exe or /bin/magicc dirs_to_copy = [".", "bin", "run"] # Check that the executable is in a valid sub directory assert exec_dir in dirs_to_copy, "binary must be in bin/ or run/ directory" for d in dirs_to_copy: source_dir = abspath(join(self.original_dir, "..", d)) if exists(source_dir): _copy_files(source_dir, join(self.root_dir, d)) # Create an empty out dir # MAGICC assumes that the 'out' directory already exists makedirs(join(self.root_dir, "out")) # Create basic configuration files so magicc can run self.set_years() self.set_config()
[ "def", "create_copy", "(", "self", ")", ":", "if", "self", ".", "executable", "is", "None", "or", "not", "isfile", "(", "self", ".", "executable", ")", ":", "raise", "FileNotFoundError", "(", "\"Could not find MAGICC{} executable: {}\"", ".", "format", "(", "self", ".", "version", ",", "self", ".", "executable", ")", ")", "if", "self", ".", "is_temp", ":", "assert", "(", "self", ".", "root_dir", "is", "None", ")", ",", "\"A temp copy for this instance has already been created\"", "self", ".", "root_dir", "=", "mkdtemp", "(", "prefix", "=", "\"pymagicc-\"", ")", "if", "exists", "(", "self", ".", "run_dir", ")", ":", "raise", "Exception", "(", "\"A copy of MAGICC has already been created.\"", ")", "if", "not", "exists", "(", "self", ".", "root_dir", ")", ":", "makedirs", "(", "self", ".", "root_dir", ")", "exec_dir", "=", "basename", "(", "self", ".", "original_dir", ")", "# Copy a subset of folders from the MAGICC `original_dir`", "# Also copy anything which is in the root of the MAGICC distribution", "# Assumes that the MAGICC binary is in a folder one level below the root", "# of the MAGICC distribution. i.e. /run/magicc.exe or /bin/magicc", "dirs_to_copy", "=", "[", "\".\"", ",", "\"bin\"", ",", "\"run\"", "]", "# Check that the executable is in a valid sub directory", "assert", "exec_dir", "in", "dirs_to_copy", ",", "\"binary must be in bin/ or run/ directory\"", "for", "d", "in", "dirs_to_copy", ":", "source_dir", "=", "abspath", "(", "join", "(", "self", ".", "original_dir", ",", "\"..\"", ",", "d", ")", ")", "if", "exists", "(", "source_dir", ")", ":", "_copy_files", "(", "source_dir", ",", "join", "(", "self", ".", "root_dir", ",", "d", ")", ")", "# Create an empty out dir", "# MAGICC assumes that the 'out' directory already exists", "makedirs", "(", "join", "(", "self", ".", "root_dir", ",", "\"out\"", ")", ")", "# Create basic configuration files so magicc can run", "self", ".", "set_years", "(", ")", "self", ".", "set_config", "(", ")" ]
Initialises a temporary directory structure and copy of MAGICC configuration files and binary.
[ "Initialises", "a", "temporary", "directory", "structure", "and", "copy", "of", "MAGICC", "configuration", "files", "and", "binary", "." ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L105-L148
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.run
def run(self, scenario=None, only=None, **kwargs): """ Run MAGICC and parse the output. As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its parameters into ``out/PARAMETERS.OUT`` and they will then be read into ``output.metadata["parameters"]`` where ``output`` is the returned object. Parameters ---------- scenario : :obj:`pymagicc.io.MAGICCData` Scenario to run. If None MAGICC will simply run with whatever config has already been set. only : list of str If not None, only extract variables in this list. kwargs Other config values to pass to MAGICC for the run Returns ------- :obj:`pymagicc.io.MAGICCData` MAGICCData object containing that data in its ``df`` attribute and metadata and parameters (depending on the value of ``include_parameters``) in its ``metadata`` attribute. Raises ------ ValueError If no output is found which matches the list specified in ``only``. """ if not exists(self.root_dir): raise FileNotFoundError(self.root_dir) if self.executable is None: raise ValueError( "MAGICC executable not found, try setting an environment variable `MAGICC_EXECUTABLE_{}=/path/to/binary`".format( self.version ) ) if scenario is not None: kwargs = self.set_emission_scenario_setup(scenario, kwargs) yr_config = {} if "startyear" in kwargs: yr_config["startyear"] = kwargs.pop("startyear") if "endyear" in kwargs: yr_config["endyear"] = kwargs.pop("endyear") if yr_config: self.set_years(**yr_config) # should be able to do some other nice metadata stuff re how magicc was run # etc. here kwargs.setdefault("rundate", get_date_time_string()) self.update_config(**kwargs) self.check_config() exec_dir = basename(self.original_dir) command = [join(self.root_dir, exec_dir, self.binary_name)] if not IS_WINDOWS and self.binary_name.endswith(".exe"): # pragma: no cover command.insert(0, "wine") # On Windows shell=True is required. subprocess.check_call(command, cwd=self.run_dir, shell=IS_WINDOWS) outfiles = self._get_output_filenames() read_cols = {"climate_model": ["MAGICC{}".format(self.version)]} if scenario is not None: read_cols["model"] = scenario["model"].unique().tolist() read_cols["scenario"] = scenario["scenario"].unique().tolist() else: read_cols.setdefault("model", ["unspecified"]) read_cols.setdefault("scenario", ["unspecified"]) mdata = None for filepath in outfiles: try: openscm_var = _get_openscm_var_from_filepath(filepath) if only is None or openscm_var in only: tempdata = MAGICCData( join(self.out_dir, filepath), columns=deepcopy(read_cols) ) mdata = mdata.append(tempdata) if mdata is not None else tempdata except (NoReaderWriterError, InvalidTemporalResError): continue if mdata is None: error_msg = "No output found for only={}".format(only) raise ValueError(error_msg) try: run_paras = self.read_parameters() self.config = run_paras mdata.metadata["parameters"] = run_paras except FileNotFoundError: pass return mdata
python
def run(self, scenario=None, only=None, **kwargs): """ Run MAGICC and parse the output. As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its parameters into ``out/PARAMETERS.OUT`` and they will then be read into ``output.metadata["parameters"]`` where ``output`` is the returned object. Parameters ---------- scenario : :obj:`pymagicc.io.MAGICCData` Scenario to run. If None MAGICC will simply run with whatever config has already been set. only : list of str If not None, only extract variables in this list. kwargs Other config values to pass to MAGICC for the run Returns ------- :obj:`pymagicc.io.MAGICCData` MAGICCData object containing that data in its ``df`` attribute and metadata and parameters (depending on the value of ``include_parameters``) in its ``metadata`` attribute. Raises ------ ValueError If no output is found which matches the list specified in ``only``. """ if not exists(self.root_dir): raise FileNotFoundError(self.root_dir) if self.executable is None: raise ValueError( "MAGICC executable not found, try setting an environment variable `MAGICC_EXECUTABLE_{}=/path/to/binary`".format( self.version ) ) if scenario is not None: kwargs = self.set_emission_scenario_setup(scenario, kwargs) yr_config = {} if "startyear" in kwargs: yr_config["startyear"] = kwargs.pop("startyear") if "endyear" in kwargs: yr_config["endyear"] = kwargs.pop("endyear") if yr_config: self.set_years(**yr_config) # should be able to do some other nice metadata stuff re how magicc was run # etc. here kwargs.setdefault("rundate", get_date_time_string()) self.update_config(**kwargs) self.check_config() exec_dir = basename(self.original_dir) command = [join(self.root_dir, exec_dir, self.binary_name)] if not IS_WINDOWS and self.binary_name.endswith(".exe"): # pragma: no cover command.insert(0, "wine") # On Windows shell=True is required. subprocess.check_call(command, cwd=self.run_dir, shell=IS_WINDOWS) outfiles = self._get_output_filenames() read_cols = {"climate_model": ["MAGICC{}".format(self.version)]} if scenario is not None: read_cols["model"] = scenario["model"].unique().tolist() read_cols["scenario"] = scenario["scenario"].unique().tolist() else: read_cols.setdefault("model", ["unspecified"]) read_cols.setdefault("scenario", ["unspecified"]) mdata = None for filepath in outfiles: try: openscm_var = _get_openscm_var_from_filepath(filepath) if only is None or openscm_var in only: tempdata = MAGICCData( join(self.out_dir, filepath), columns=deepcopy(read_cols) ) mdata = mdata.append(tempdata) if mdata is not None else tempdata except (NoReaderWriterError, InvalidTemporalResError): continue if mdata is None: error_msg = "No output found for only={}".format(only) raise ValueError(error_msg) try: run_paras = self.read_parameters() self.config = run_paras mdata.metadata["parameters"] = run_paras except FileNotFoundError: pass return mdata
[ "def", "run", "(", "self", ",", "scenario", "=", "None", ",", "only", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "not", "exists", "(", "self", ".", "root_dir", ")", ":", "raise", "FileNotFoundError", "(", "self", ".", "root_dir", ")", "if", "self", ".", "executable", "is", "None", ":", "raise", "ValueError", "(", "\"MAGICC executable not found, try setting an environment variable `MAGICC_EXECUTABLE_{}=/path/to/binary`\"", ".", "format", "(", "self", ".", "version", ")", ")", "if", "scenario", "is", "not", "None", ":", "kwargs", "=", "self", ".", "set_emission_scenario_setup", "(", "scenario", ",", "kwargs", ")", "yr_config", "=", "{", "}", "if", "\"startyear\"", "in", "kwargs", ":", "yr_config", "[", "\"startyear\"", "]", "=", "kwargs", ".", "pop", "(", "\"startyear\"", ")", "if", "\"endyear\"", "in", "kwargs", ":", "yr_config", "[", "\"endyear\"", "]", "=", "kwargs", ".", "pop", "(", "\"endyear\"", ")", "if", "yr_config", ":", "self", ".", "set_years", "(", "*", "*", "yr_config", ")", "# should be able to do some other nice metadata stuff re how magicc was run", "# etc. here", "kwargs", ".", "setdefault", "(", "\"rundate\"", ",", "get_date_time_string", "(", ")", ")", "self", ".", "update_config", "(", "*", "*", "kwargs", ")", "self", ".", "check_config", "(", ")", "exec_dir", "=", "basename", "(", "self", ".", "original_dir", ")", "command", "=", "[", "join", "(", "self", ".", "root_dir", ",", "exec_dir", ",", "self", ".", "binary_name", ")", "]", "if", "not", "IS_WINDOWS", "and", "self", ".", "binary_name", ".", "endswith", "(", "\".exe\"", ")", ":", "# pragma: no cover", "command", ".", "insert", "(", "0", ",", "\"wine\"", ")", "# On Windows shell=True is required.", "subprocess", ".", "check_call", "(", "command", ",", "cwd", "=", "self", ".", "run_dir", ",", "shell", "=", "IS_WINDOWS", ")", "outfiles", "=", "self", ".", "_get_output_filenames", "(", ")", "read_cols", "=", "{", "\"climate_model\"", ":", "[", "\"MAGICC{}\"", ".", "format", "(", "self", ".", "version", ")", "]", "}", "if", "scenario", "is", "not", "None", ":", "read_cols", "[", "\"model\"", "]", "=", "scenario", "[", "\"model\"", "]", ".", "unique", "(", ")", ".", "tolist", "(", ")", "read_cols", "[", "\"scenario\"", "]", "=", "scenario", "[", "\"scenario\"", "]", ".", "unique", "(", ")", ".", "tolist", "(", ")", "else", ":", "read_cols", ".", "setdefault", "(", "\"model\"", ",", "[", "\"unspecified\"", "]", ")", "read_cols", ".", "setdefault", "(", "\"scenario\"", ",", "[", "\"unspecified\"", "]", ")", "mdata", "=", "None", "for", "filepath", "in", "outfiles", ":", "try", ":", "openscm_var", "=", "_get_openscm_var_from_filepath", "(", "filepath", ")", "if", "only", "is", "None", "or", "openscm_var", "in", "only", ":", "tempdata", "=", "MAGICCData", "(", "join", "(", "self", ".", "out_dir", ",", "filepath", ")", ",", "columns", "=", "deepcopy", "(", "read_cols", ")", ")", "mdata", "=", "mdata", ".", "append", "(", "tempdata", ")", "if", "mdata", "is", "not", "None", "else", "tempdata", "except", "(", "NoReaderWriterError", ",", "InvalidTemporalResError", ")", ":", "continue", "if", "mdata", "is", "None", ":", "error_msg", "=", "\"No output found for only={}\"", ".", "format", "(", "only", ")", "raise", "ValueError", "(", "error_msg", ")", "try", ":", "run_paras", "=", "self", ".", "read_parameters", "(", ")", "self", ".", "config", "=", "run_paras", "mdata", ".", "metadata", "[", "\"parameters\"", "]", "=", "run_paras", "except", "FileNotFoundError", ":", "pass", "return", "mdata" ]
Run MAGICC and parse the output. As a reminder, putting ``out_parameters=1`` will cause MAGICC to write out its parameters into ``out/PARAMETERS.OUT`` and they will then be read into ``output.metadata["parameters"]`` where ``output`` is the returned object. Parameters ---------- scenario : :obj:`pymagicc.io.MAGICCData` Scenario to run. If None MAGICC will simply run with whatever config has already been set. only : list of str If not None, only extract variables in this list. kwargs Other config values to pass to MAGICC for the run Returns ------- :obj:`pymagicc.io.MAGICCData` MAGICCData object containing that data in its ``df`` attribute and metadata and parameters (depending on the value of ``include_parameters``) in its ``metadata`` attribute. Raises ------ ValueError If no output is found which matches the list specified in ``only``.
[ "Run", "MAGICC", "and", "parse", "the", "output", "." ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L170-L274
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.check_config
def check_config(self): """Check that our MAGICC ``.CFG`` files are set to safely work with PYMAGICC For further detail about why this is required, please see :ref:`MAGICC flags`. Raises ------ ValueError If we are not certain that the config written by PYMAGICC will overwrite all other config i.e. that there will be no unexpected behaviour. A ValueError will also be raised if the user tries to use more than one scenario file. """ cfg_error_msg = ( "PYMAGICC is not the only tuning model that will be used by " "`MAGCFG_USER.CFG`: your run is likely to fail/do odd things" ) emisscen_error_msg = ( "You have more than one `FILE_EMISSCEN_X` flag set. Using more than " "one emissions scenario is hard to debug and unnecessary with " "Pymagicc's dataframe scenario input. Please combine all your " "scenarios into one dataframe with Pymagicc and pandas, then feed " "this single Dataframe into Pymagicc's run API." ) nml_to_check = "nml_allcfgs" usr_cfg = read_cfg_file(join(self.run_dir, "MAGCFG_USER.CFG")) for k in usr_cfg[nml_to_check]: if k.startswith("file_tuningmodel"): first_tuningmodel = k in ["file_tuningmodel", "file_tuningmodel_1"] if first_tuningmodel: if usr_cfg[nml_to_check][k] != "PYMAGICC": raise ValueError(cfg_error_msg) elif usr_cfg[nml_to_check][k] not in ["USER", ""]: raise ValueError(cfg_error_msg) elif k.startswith("file_emisscen_"): if usr_cfg[nml_to_check][k] not in ["NONE", ""]: raise ValueError(emisscen_error_msg)
python
def check_config(self): """Check that our MAGICC ``.CFG`` files are set to safely work with PYMAGICC For further detail about why this is required, please see :ref:`MAGICC flags`. Raises ------ ValueError If we are not certain that the config written by PYMAGICC will overwrite all other config i.e. that there will be no unexpected behaviour. A ValueError will also be raised if the user tries to use more than one scenario file. """ cfg_error_msg = ( "PYMAGICC is not the only tuning model that will be used by " "`MAGCFG_USER.CFG`: your run is likely to fail/do odd things" ) emisscen_error_msg = ( "You have more than one `FILE_EMISSCEN_X` flag set. Using more than " "one emissions scenario is hard to debug and unnecessary with " "Pymagicc's dataframe scenario input. Please combine all your " "scenarios into one dataframe with Pymagicc and pandas, then feed " "this single Dataframe into Pymagicc's run API." ) nml_to_check = "nml_allcfgs" usr_cfg = read_cfg_file(join(self.run_dir, "MAGCFG_USER.CFG")) for k in usr_cfg[nml_to_check]: if k.startswith("file_tuningmodel"): first_tuningmodel = k in ["file_tuningmodel", "file_tuningmodel_1"] if first_tuningmodel: if usr_cfg[nml_to_check][k] != "PYMAGICC": raise ValueError(cfg_error_msg) elif usr_cfg[nml_to_check][k] not in ["USER", ""]: raise ValueError(cfg_error_msg) elif k.startswith("file_emisscen_"): if usr_cfg[nml_to_check][k] not in ["NONE", ""]: raise ValueError(emisscen_error_msg)
[ "def", "check_config", "(", "self", ")", ":", "cfg_error_msg", "=", "(", "\"PYMAGICC is not the only tuning model that will be used by \"", "\"`MAGCFG_USER.CFG`: your run is likely to fail/do odd things\"", ")", "emisscen_error_msg", "=", "(", "\"You have more than one `FILE_EMISSCEN_X` flag set. Using more than \"", "\"one emissions scenario is hard to debug and unnecessary with \"", "\"Pymagicc's dataframe scenario input. Please combine all your \"", "\"scenarios into one dataframe with Pymagicc and pandas, then feed \"", "\"this single Dataframe into Pymagicc's run API.\"", ")", "nml_to_check", "=", "\"nml_allcfgs\"", "usr_cfg", "=", "read_cfg_file", "(", "join", "(", "self", ".", "run_dir", ",", "\"MAGCFG_USER.CFG\"", ")", ")", "for", "k", "in", "usr_cfg", "[", "nml_to_check", "]", ":", "if", "k", ".", "startswith", "(", "\"file_tuningmodel\"", ")", ":", "first_tuningmodel", "=", "k", "in", "[", "\"file_tuningmodel\"", ",", "\"file_tuningmodel_1\"", "]", "if", "first_tuningmodel", ":", "if", "usr_cfg", "[", "nml_to_check", "]", "[", "k", "]", "!=", "\"PYMAGICC\"", ":", "raise", "ValueError", "(", "cfg_error_msg", ")", "elif", "usr_cfg", "[", "nml_to_check", "]", "[", "k", "]", "not", "in", "[", "\"USER\"", ",", "\"\"", "]", ":", "raise", "ValueError", "(", "cfg_error_msg", ")", "elif", "k", ".", "startswith", "(", "\"file_emisscen_\"", ")", ":", "if", "usr_cfg", "[", "nml_to_check", "]", "[", "k", "]", "not", "in", "[", "\"NONE\"", ",", "\"\"", "]", ":", "raise", "ValueError", "(", "emisscen_error_msg", ")" ]
Check that our MAGICC ``.CFG`` files are set to safely work with PYMAGICC For further detail about why this is required, please see :ref:`MAGICC flags`. Raises ------ ValueError If we are not certain that the config written by PYMAGICC will overwrite all other config i.e. that there will be no unexpected behaviour. A ValueError will also be raised if the user tries to use more than one scenario file.
[ "Check", "that", "our", "MAGICC", ".", "CFG", "files", "are", "set", "to", "safely", "work", "with", "PYMAGICC" ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L293-L331
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.write
def write(self, mdata, name): """Write an input file to disk Parameters ---------- mdata : :obj:`pymagicc.io.MAGICCData` A MAGICCData instance with the data to write name : str The name of the file to write. The file will be written to the MAGICC instance's run directory i.e. ``self.run_dir`` """ mdata.write(join(self.run_dir, name), self.version)
python
def write(self, mdata, name): """Write an input file to disk Parameters ---------- mdata : :obj:`pymagicc.io.MAGICCData` A MAGICCData instance with the data to write name : str The name of the file to write. The file will be written to the MAGICC instance's run directory i.e. ``self.run_dir`` """ mdata.write(join(self.run_dir, name), self.version)
[ "def", "write", "(", "self", ",", "mdata", ",", "name", ")", ":", "mdata", ".", "write", "(", "join", "(", "self", ".", "run_dir", ",", "name", ")", ",", "self", ".", "version", ")" ]
Write an input file to disk Parameters ---------- mdata : :obj:`pymagicc.io.MAGICCData` A MAGICCData instance with the data to write name : str The name of the file to write. The file will be written to the MAGICC instance's run directory i.e. ``self.run_dir``
[ "Write", "an", "input", "file", "to", "disk" ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L333-L345
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.read_parameters
def read_parameters(self): """ Read a parameters.out file Returns ------- dict A dictionary containing all the configuration used by MAGICC """ param_fname = join(self.out_dir, "PARAMETERS.OUT") if not exists(param_fname): raise FileNotFoundError("No PARAMETERS.OUT found") with open(param_fname) as nml_file: parameters = dict(f90nml.read(nml_file)) for group in ["nml_years", "nml_allcfgs", "nml_outputcfgs"]: parameters[group] = dict(parameters[group]) for k, v in parameters[group].items(): parameters[group][k] = _clean_value(v) parameters[group.replace("nml_", "")] = parameters.pop(group) self.config = parameters return parameters
python
def read_parameters(self): """ Read a parameters.out file Returns ------- dict A dictionary containing all the configuration used by MAGICC """ param_fname = join(self.out_dir, "PARAMETERS.OUT") if not exists(param_fname): raise FileNotFoundError("No PARAMETERS.OUT found") with open(param_fname) as nml_file: parameters = dict(f90nml.read(nml_file)) for group in ["nml_years", "nml_allcfgs", "nml_outputcfgs"]: parameters[group] = dict(parameters[group]) for k, v in parameters[group].items(): parameters[group][k] = _clean_value(v) parameters[group.replace("nml_", "")] = parameters.pop(group) self.config = parameters return parameters
[ "def", "read_parameters", "(", "self", ")", ":", "param_fname", "=", "join", "(", "self", ".", "out_dir", ",", "\"PARAMETERS.OUT\"", ")", "if", "not", "exists", "(", "param_fname", ")", ":", "raise", "FileNotFoundError", "(", "\"No PARAMETERS.OUT found\"", ")", "with", "open", "(", "param_fname", ")", "as", "nml_file", ":", "parameters", "=", "dict", "(", "f90nml", ".", "read", "(", "nml_file", ")", ")", "for", "group", "in", "[", "\"nml_years\"", ",", "\"nml_allcfgs\"", ",", "\"nml_outputcfgs\"", "]", ":", "parameters", "[", "group", "]", "=", "dict", "(", "parameters", "[", "group", "]", ")", "for", "k", ",", "v", "in", "parameters", "[", "group", "]", ".", "items", "(", ")", ":", "parameters", "[", "group", "]", "[", "k", "]", "=", "_clean_value", "(", "v", ")", "parameters", "[", "group", ".", "replace", "(", "\"nml_\"", ",", "\"\"", ")", "]", "=", "parameters", ".", "pop", "(", "group", ")", "self", ".", "config", "=", "parameters", "return", "parameters" ]
Read a parameters.out file Returns ------- dict A dictionary containing all the configuration used by MAGICC
[ "Read", "a", "parameters", ".", "out", "file" ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L347-L369
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.remove_temp_copy
def remove_temp_copy(self): """ Removes a temporary copy of the MAGICC version shipped with Pymagicc. """ if self.is_temp and self.root_dir is not None: shutil.rmtree(self.root_dir) self.root_dir = None
python
def remove_temp_copy(self): """ Removes a temporary copy of the MAGICC version shipped with Pymagicc. """ if self.is_temp and self.root_dir is not None: shutil.rmtree(self.root_dir) self.root_dir = None
[ "def", "remove_temp_copy", "(", "self", ")", ":", "if", "self", ".", "is_temp", "and", "self", ".", "root_dir", "is", "not", "None", ":", "shutil", ".", "rmtree", "(", "self", ".", "root_dir", ")", "self", ".", "root_dir", "=", "None" ]
Removes a temporary copy of the MAGICC version shipped with Pymagicc.
[ "Removes", "a", "temporary", "copy", "of", "the", "MAGICC", "version", "shipped", "with", "Pymagicc", "." ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L371-L377
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.set_config
def set_config( self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs ): """ Create a configuration file for MAGICC. Writes a fortran namelist in run_dir. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file """ kwargs = self._format_config(kwargs) fname = join(self.run_dir, filename) conf = {top_level_key: kwargs} f90nml.write(conf, fname, force=True) return conf
python
def set_config( self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs ): """ Create a configuration file for MAGICC. Writes a fortran namelist in run_dir. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file """ kwargs = self._format_config(kwargs) fname = join(self.run_dir, filename) conf = {top_level_key: kwargs} f90nml.write(conf, fname, force=True) return conf
[ "def", "set_config", "(", "self", ",", "filename", "=", "\"MAGTUNE_PYMAGICC.CFG\"", ",", "top_level_key", "=", "\"nml_allcfgs\"", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "self", ".", "_format_config", "(", "kwargs", ")", "fname", "=", "join", "(", "self", ".", "run_dir", ",", "filename", ")", "conf", "=", "{", "top_level_key", ":", "kwargs", "}", "f90nml", ".", "write", "(", "conf", ",", "fname", ",", "force", "=", "True", ")", "return", "conf" ]
Create a configuration file for MAGICC. Writes a fortran namelist in run_dir. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file
[ "Create", "a", "configuration", "file", "for", "MAGICC", "." ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L379-L411
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.update_config
def update_config( self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs ): """Updates a configuration file for MAGICC Updates the contents of a fortran namelist in the run directory, creating a new namelist if none exists. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file """ kwargs = self._format_config(kwargs) fname = join(self.run_dir, filename) if exists(fname): conf = f90nml.read(fname) else: conf = {top_level_key: {}} conf[top_level_key].update(kwargs) f90nml.write(conf, fname, force=True) return conf
python
def update_config( self, filename="MAGTUNE_PYMAGICC.CFG", top_level_key="nml_allcfgs", **kwargs ): """Updates a configuration file for MAGICC Updates the contents of a fortran namelist in the run directory, creating a new namelist if none exists. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file """ kwargs = self._format_config(kwargs) fname = join(self.run_dir, filename) if exists(fname): conf = f90nml.read(fname) else: conf = {top_level_key: {}} conf[top_level_key].update(kwargs) f90nml.write(conf, fname, force=True) return conf
[ "def", "update_config", "(", "self", ",", "filename", "=", "\"MAGTUNE_PYMAGICC.CFG\"", ",", "top_level_key", "=", "\"nml_allcfgs\"", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "self", ".", "_format_config", "(", "kwargs", ")", "fname", "=", "join", "(", "self", ".", "run_dir", ",", "filename", ")", "if", "exists", "(", "fname", ")", ":", "conf", "=", "f90nml", ".", "read", "(", "fname", ")", "else", ":", "conf", "=", "{", "top_level_key", ":", "{", "}", "}", "conf", "[", "top_level_key", "]", ".", "update", "(", "kwargs", ")", "f90nml", ".", "write", "(", "conf", ",", "fname", ",", "force", "=", "True", ")", "return", "conf" ]
Updates a configuration file for MAGICC Updates the contents of a fortran namelist in the run directory, creating a new namelist if none exists. Parameters ---------- filename : str Name of configuration file to write top_level_key : str Name of namelist to be written in the configuration file kwargs Other parameters to pass to the configuration file. No validation on the parameters is performed. Returns ------- dict The contents of the namelist which was written to file
[ "Updates", "a", "configuration", "file", "for", "MAGICC" ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L413-L450
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.set_zero_config
def set_zero_config(self): """Set config such that radiative forcing and temperature output will be zero This method is intended as a convenience only, it does not handle everything in an obvious way. Adjusting the parameter settings still requires great care and may behave unepexctedly. """ # zero_emissions is imported from scenarios module zero_emissions.write(join(self.run_dir, self._scen_file_name), self.version) time = zero_emissions.filter(variable="Emissions|CH4", region="World")[ "time" ].values no_timesteps = len(time) # value doesn't actually matter as calculations are done from difference but # chose sensible value nonetheless ch4_conc_pi = 722 ch4_conc = ch4_conc_pi * np.ones(no_timesteps) ch4_conc_df = pd.DataFrame( { "time": time, "scenario": "idealised", "model": "unspecified", "climate_model": "unspecified", "variable": "Atmospheric Concentrations|CH4", "unit": "ppb", "todo": "SET", "region": "World", "value": ch4_conc, } ) ch4_conc_writer = MAGICCData(ch4_conc_df) ch4_conc_filename = "HIST_CONSTANT_CH4_CONC.IN" ch4_conc_writer.metadata = { "header": "Constant pre-industrial CH4 concentrations" } ch4_conc_writer.write(join(self.run_dir, ch4_conc_filename), self.version) fgas_conc_pi = 0 fgas_conc = fgas_conc_pi * np.ones(no_timesteps) # MAGICC6 doesn't read this so not a problem, for MAGICC7 we might have to # write each file separately varname = "FGAS_CONC" fgas_conc_df = pd.DataFrame( { "time": time, "scenario": "idealised", "model": "unspecified", "climate_model": "unspecified", "variable": varname, "unit": "ppt", "todo": "SET", "region": "World", "value": fgas_conc, } ) fgas_conc_writer = MAGICCData(fgas_conc_df) fgas_conc_filename = "HIST_ZERO_{}.IN".format(varname) fgas_conc_writer.metadata = {"header": "Zero concentrations"} fgas_conc_writer.write(join(self.run_dir, fgas_conc_filename), self.version) emis_config = self._fix_any_backwards_emissions_scen_key_in_config( {"file_emissionscenario": self._scen_file_name} ) self.set_config( **emis_config, rf_initialization_method="ZEROSTARTSHIFT", rf_total_constantafteryr=10000, file_co2i_emis="", file_co2b_emis="", co2_switchfromconc2emis_year=1750, file_ch4i_emis="", file_ch4b_emis="", file_ch4n_emis="", file_ch4_conc=ch4_conc_filename, ch4_switchfromconc2emis_year=10000, file_n2oi_emis="", file_n2ob_emis="", file_n2on_emis="", file_n2o_conc="", n2o_switchfromconc2emis_year=1750, file_noxi_emis="", file_noxb_emis="", file_noxi_ot="", file_noxb_ot="", file_noxt_rf="", file_soxnb_ot="", file_soxi_ot="", file_soxt_rf="", file_soxi_emis="", file_soxb_emis="", file_soxn_emis="", file_oci_emis="", file_ocb_emis="", file_oci_ot="", file_ocb_ot="", file_oci_rf="", file_ocb_rf="", file_bci_emis="", file_bcb_emis="", file_bci_ot="", file_bcb_ot="", file_bci_rf="", file_bcb_rf="", bcoc_switchfromrf2emis_year=1750, file_nh3i_emis="", file_nh3b_emis="", file_nmvoci_emis="", file_nmvocb_emis="", file_coi_emis="", file_cob_emis="", file_mineraldust_rf="", file_landuse_rf="", file_bcsnow_rf="", # rf_fgassum_scale=0, # this appears to do nothing, hence the next two lines file_fgas_conc=[fgas_conc_filename] * 12, fgas_switchfromconc2emis_year=10000, rf_mhalosum_scale=0, mhalo_switch_conc2emis_yr=1750, stratoz_o3scale=0, rf_volcanic_scale=0, rf_solar_scale=0, )
python
def set_zero_config(self): """Set config such that radiative forcing and temperature output will be zero This method is intended as a convenience only, it does not handle everything in an obvious way. Adjusting the parameter settings still requires great care and may behave unepexctedly. """ # zero_emissions is imported from scenarios module zero_emissions.write(join(self.run_dir, self._scen_file_name), self.version) time = zero_emissions.filter(variable="Emissions|CH4", region="World")[ "time" ].values no_timesteps = len(time) # value doesn't actually matter as calculations are done from difference but # chose sensible value nonetheless ch4_conc_pi = 722 ch4_conc = ch4_conc_pi * np.ones(no_timesteps) ch4_conc_df = pd.DataFrame( { "time": time, "scenario": "idealised", "model": "unspecified", "climate_model": "unspecified", "variable": "Atmospheric Concentrations|CH4", "unit": "ppb", "todo": "SET", "region": "World", "value": ch4_conc, } ) ch4_conc_writer = MAGICCData(ch4_conc_df) ch4_conc_filename = "HIST_CONSTANT_CH4_CONC.IN" ch4_conc_writer.metadata = { "header": "Constant pre-industrial CH4 concentrations" } ch4_conc_writer.write(join(self.run_dir, ch4_conc_filename), self.version) fgas_conc_pi = 0 fgas_conc = fgas_conc_pi * np.ones(no_timesteps) # MAGICC6 doesn't read this so not a problem, for MAGICC7 we might have to # write each file separately varname = "FGAS_CONC" fgas_conc_df = pd.DataFrame( { "time": time, "scenario": "idealised", "model": "unspecified", "climate_model": "unspecified", "variable": varname, "unit": "ppt", "todo": "SET", "region": "World", "value": fgas_conc, } ) fgas_conc_writer = MAGICCData(fgas_conc_df) fgas_conc_filename = "HIST_ZERO_{}.IN".format(varname) fgas_conc_writer.metadata = {"header": "Zero concentrations"} fgas_conc_writer.write(join(self.run_dir, fgas_conc_filename), self.version) emis_config = self._fix_any_backwards_emissions_scen_key_in_config( {"file_emissionscenario": self._scen_file_name} ) self.set_config( **emis_config, rf_initialization_method="ZEROSTARTSHIFT", rf_total_constantafteryr=10000, file_co2i_emis="", file_co2b_emis="", co2_switchfromconc2emis_year=1750, file_ch4i_emis="", file_ch4b_emis="", file_ch4n_emis="", file_ch4_conc=ch4_conc_filename, ch4_switchfromconc2emis_year=10000, file_n2oi_emis="", file_n2ob_emis="", file_n2on_emis="", file_n2o_conc="", n2o_switchfromconc2emis_year=1750, file_noxi_emis="", file_noxb_emis="", file_noxi_ot="", file_noxb_ot="", file_noxt_rf="", file_soxnb_ot="", file_soxi_ot="", file_soxt_rf="", file_soxi_emis="", file_soxb_emis="", file_soxn_emis="", file_oci_emis="", file_ocb_emis="", file_oci_ot="", file_ocb_ot="", file_oci_rf="", file_ocb_rf="", file_bci_emis="", file_bcb_emis="", file_bci_ot="", file_bcb_ot="", file_bci_rf="", file_bcb_rf="", bcoc_switchfromrf2emis_year=1750, file_nh3i_emis="", file_nh3b_emis="", file_nmvoci_emis="", file_nmvocb_emis="", file_coi_emis="", file_cob_emis="", file_mineraldust_rf="", file_landuse_rf="", file_bcsnow_rf="", # rf_fgassum_scale=0, # this appears to do nothing, hence the next two lines file_fgas_conc=[fgas_conc_filename] * 12, fgas_switchfromconc2emis_year=10000, rf_mhalosum_scale=0, mhalo_switch_conc2emis_yr=1750, stratoz_o3scale=0, rf_volcanic_scale=0, rf_solar_scale=0, )
[ "def", "set_zero_config", "(", "self", ")", ":", "# zero_emissions is imported from scenarios module", "zero_emissions", ".", "write", "(", "join", "(", "self", ".", "run_dir", ",", "self", ".", "_scen_file_name", ")", ",", "self", ".", "version", ")", "time", "=", "zero_emissions", ".", "filter", "(", "variable", "=", "\"Emissions|CH4\"", ",", "region", "=", "\"World\"", ")", "[", "\"time\"", "]", ".", "values", "no_timesteps", "=", "len", "(", "time", ")", "# value doesn't actually matter as calculations are done from difference but", "# chose sensible value nonetheless", "ch4_conc_pi", "=", "722", "ch4_conc", "=", "ch4_conc_pi", "*", "np", ".", "ones", "(", "no_timesteps", ")", "ch4_conc_df", "=", "pd", ".", "DataFrame", "(", "{", "\"time\"", ":", "time", ",", "\"scenario\"", ":", "\"idealised\"", ",", "\"model\"", ":", "\"unspecified\"", ",", "\"climate_model\"", ":", "\"unspecified\"", ",", "\"variable\"", ":", "\"Atmospheric Concentrations|CH4\"", ",", "\"unit\"", ":", "\"ppb\"", ",", "\"todo\"", ":", "\"SET\"", ",", "\"region\"", ":", "\"World\"", ",", "\"value\"", ":", "ch4_conc", ",", "}", ")", "ch4_conc_writer", "=", "MAGICCData", "(", "ch4_conc_df", ")", "ch4_conc_filename", "=", "\"HIST_CONSTANT_CH4_CONC.IN\"", "ch4_conc_writer", ".", "metadata", "=", "{", "\"header\"", ":", "\"Constant pre-industrial CH4 concentrations\"", "}", "ch4_conc_writer", ".", "write", "(", "join", "(", "self", ".", "run_dir", ",", "ch4_conc_filename", ")", ",", "self", ".", "version", ")", "fgas_conc_pi", "=", "0", "fgas_conc", "=", "fgas_conc_pi", "*", "np", ".", "ones", "(", "no_timesteps", ")", "# MAGICC6 doesn't read this so not a problem, for MAGICC7 we might have to", "# write each file separately", "varname", "=", "\"FGAS_CONC\"", "fgas_conc_df", "=", "pd", ".", "DataFrame", "(", "{", "\"time\"", ":", "time", ",", "\"scenario\"", ":", "\"idealised\"", ",", "\"model\"", ":", "\"unspecified\"", ",", "\"climate_model\"", ":", "\"unspecified\"", ",", "\"variable\"", ":", "varname", ",", "\"unit\"", ":", "\"ppt\"", ",", "\"todo\"", ":", "\"SET\"", ",", "\"region\"", ":", "\"World\"", ",", "\"value\"", ":", "fgas_conc", ",", "}", ")", "fgas_conc_writer", "=", "MAGICCData", "(", "fgas_conc_df", ")", "fgas_conc_filename", "=", "\"HIST_ZERO_{}.IN\"", ".", "format", "(", "varname", ")", "fgas_conc_writer", ".", "metadata", "=", "{", "\"header\"", ":", "\"Zero concentrations\"", "}", "fgas_conc_writer", ".", "write", "(", "join", "(", "self", ".", "run_dir", ",", "fgas_conc_filename", ")", ",", "self", ".", "version", ")", "emis_config", "=", "self", ".", "_fix_any_backwards_emissions_scen_key_in_config", "(", "{", "\"file_emissionscenario\"", ":", "self", ".", "_scen_file_name", "}", ")", "self", ".", "set_config", "(", "*", "*", "emis_config", ",", "rf_initialization_method", "=", "\"ZEROSTARTSHIFT\"", ",", "rf_total_constantafteryr", "=", "10000", ",", "file_co2i_emis", "=", "\"\"", ",", "file_co2b_emis", "=", "\"\"", ",", "co2_switchfromconc2emis_year", "=", "1750", ",", "file_ch4i_emis", "=", "\"\"", ",", "file_ch4b_emis", "=", "\"\"", ",", "file_ch4n_emis", "=", "\"\"", ",", "file_ch4_conc", "=", "ch4_conc_filename", ",", "ch4_switchfromconc2emis_year", "=", "10000", ",", "file_n2oi_emis", "=", "\"\"", ",", "file_n2ob_emis", "=", "\"\"", ",", "file_n2on_emis", "=", "\"\"", ",", "file_n2o_conc", "=", "\"\"", ",", "n2o_switchfromconc2emis_year", "=", "1750", ",", "file_noxi_emis", "=", "\"\"", ",", "file_noxb_emis", "=", "\"\"", ",", "file_noxi_ot", "=", "\"\"", ",", "file_noxb_ot", "=", "\"\"", ",", "file_noxt_rf", "=", "\"\"", ",", "file_soxnb_ot", "=", "\"\"", ",", "file_soxi_ot", "=", "\"\"", ",", "file_soxt_rf", "=", "\"\"", ",", "file_soxi_emis", "=", "\"\"", ",", "file_soxb_emis", "=", "\"\"", ",", "file_soxn_emis", "=", "\"\"", ",", "file_oci_emis", "=", "\"\"", ",", "file_ocb_emis", "=", "\"\"", ",", "file_oci_ot", "=", "\"\"", ",", "file_ocb_ot", "=", "\"\"", ",", "file_oci_rf", "=", "\"\"", ",", "file_ocb_rf", "=", "\"\"", ",", "file_bci_emis", "=", "\"\"", ",", "file_bcb_emis", "=", "\"\"", ",", "file_bci_ot", "=", "\"\"", ",", "file_bcb_ot", "=", "\"\"", ",", "file_bci_rf", "=", "\"\"", ",", "file_bcb_rf", "=", "\"\"", ",", "bcoc_switchfromrf2emis_year", "=", "1750", ",", "file_nh3i_emis", "=", "\"\"", ",", "file_nh3b_emis", "=", "\"\"", ",", "file_nmvoci_emis", "=", "\"\"", ",", "file_nmvocb_emis", "=", "\"\"", ",", "file_coi_emis", "=", "\"\"", ",", "file_cob_emis", "=", "\"\"", ",", "file_mineraldust_rf", "=", "\"\"", ",", "file_landuse_rf", "=", "\"\"", ",", "file_bcsnow_rf", "=", "\"\"", ",", "# rf_fgassum_scale=0, # this appears to do nothing, hence the next two lines", "file_fgas_conc", "=", "[", "fgas_conc_filename", "]", "*", "12", ",", "fgas_switchfromconc2emis_year", "=", "10000", ",", "rf_mhalosum_scale", "=", "0", ",", "mhalo_switch_conc2emis_yr", "=", "1750", ",", "stratoz_o3scale", "=", "0", ",", "rf_volcanic_scale", "=", "0", ",", "rf_solar_scale", "=", "0", ",", ")" ]
Set config such that radiative forcing and temperature output will be zero This method is intended as a convenience only, it does not handle everything in an obvious way. Adjusting the parameter settings still requires great care and may behave unepexctedly.
[ "Set", "config", "such", "that", "radiative", "forcing", "and", "temperature", "output", "will", "be", "zero" ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L452-L574
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.set_years
def set_years(self, startyear=1765, endyear=2100): """ Set the start and end dates of the simulations. Parameters ---------- startyear : int Start year of the simulation endyear : int End year of the simulation Returns ------- dict The contents of the namelist """ # TODO: test altering stepsperyear, I think 1, 2 and 24 should all work return self.set_config( "MAGCFG_NMLYEARS.CFG", "nml_years", endyear=endyear, startyear=startyear, stepsperyear=12, )
python
def set_years(self, startyear=1765, endyear=2100): """ Set the start and end dates of the simulations. Parameters ---------- startyear : int Start year of the simulation endyear : int End year of the simulation Returns ------- dict The contents of the namelist """ # TODO: test altering stepsperyear, I think 1, 2 and 24 should all work return self.set_config( "MAGCFG_NMLYEARS.CFG", "nml_years", endyear=endyear, startyear=startyear, stepsperyear=12, )
[ "def", "set_years", "(", "self", ",", "startyear", "=", "1765", ",", "endyear", "=", "2100", ")", ":", "# TODO: test altering stepsperyear, I think 1, 2 and 24 should all work", "return", "self", ".", "set_config", "(", "\"MAGCFG_NMLYEARS.CFG\"", ",", "\"nml_years\"", ",", "endyear", "=", "endyear", ",", "startyear", "=", "startyear", ",", "stepsperyear", "=", "12", ",", ")" ]
Set the start and end dates of the simulations. Parameters ---------- startyear : int Start year of the simulation endyear : int End year of the simulation Returns ------- dict The contents of the namelist
[ "Set", "the", "start", "and", "end", "dates", "of", "the", "simulations", "." ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L606-L630
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.set_output_variables
def set_output_variables(self, write_ascii=True, write_binary=False, **kwargs): """Set the output configuration, minimising output as much as possible There are a number of configuration parameters which control which variables are written to file and in which format. Limiting the variables that are written to file can greatly speed up the running of MAGICC. By default, calling this function without specifying any variables will disable all output by setting all of MAGICC's ``out_xx`` flags to ``0``. This convenience function should not be confused with ``set_config`` or ``update_config`` which allow the user to set/update the configuration flags directly, without the more convenient syntax and default behaviour provided by this function. Parameters ---------- write_ascii : bool If true, MAGICC is configured to write output files as human readable ascii files. write_binary : bool If true, MAGICC is configured to write binary output files. These files are much faster to process and write, but are not human readable. **kwargs: List of variables to write out. A list of possible options are as follows. This may not be a complete list. 'emissions', 'gwpemissions', 'sum_gwpemissions', 'concentrations', 'carboncycle', 'forcing', 'surfaceforcing', 'permafrost', 'temperature', 'sealevel', 'parameters', 'misc', 'lifetimes', 'timeseriesmix', 'rcpdata', 'summaryidx', 'inverseemis', 'tempoceanlayers', 'oceanarea', 'heatuptake', 'warnings', 'precipinput', 'aogcmtuning', 'ccycletuning', 'observationaltuning', 'keydata_1', 'keydata_2' """ assert ( write_ascii or write_binary ), "write_binary and/or write_ascii must be configured" if write_binary and write_ascii: ascii_binary = "BOTH" elif write_ascii: ascii_binary = "ASCII" else: ascii_binary = "BINARY" # defaults outconfig = { "out_emissions": 0, "out_gwpemissions": 0, "out_sum_gwpemissions": 0, "out_concentrations": 0, "out_carboncycle": 0, "out_forcing": 0, "out_surfaceforcing": 0, "out_permafrost": 0, "out_temperature": 0, "out_sealevel": 0, "out_parameters": 0, "out_misc": 0, "out_timeseriesmix": 0, "out_rcpdata": 0, "out_summaryidx": 0, "out_inverseemis": 0, "out_tempoceanlayers": 0, "out_heatuptake": 0, "out_ascii_binary": ascii_binary, "out_warnings": 0, "out_precipinput": 0, "out_aogcmtuning": 0, "out_ccycletuning": 0, "out_observationaltuning": 0, "out_keydata_1": 0, "out_keydata_2": 0, } if self.version == 7: outconfig["out_oceanarea"] = 0 outconfig["out_lifetimes"] = 0 for kw in kwargs: val = 1 if kwargs[kw] else 0 # convert values to 0/1 instead of booleans outconfig["out_" + kw.lower()] = val self.update_config(**outconfig)
python
def set_output_variables(self, write_ascii=True, write_binary=False, **kwargs): """Set the output configuration, minimising output as much as possible There are a number of configuration parameters which control which variables are written to file and in which format. Limiting the variables that are written to file can greatly speed up the running of MAGICC. By default, calling this function without specifying any variables will disable all output by setting all of MAGICC's ``out_xx`` flags to ``0``. This convenience function should not be confused with ``set_config`` or ``update_config`` which allow the user to set/update the configuration flags directly, without the more convenient syntax and default behaviour provided by this function. Parameters ---------- write_ascii : bool If true, MAGICC is configured to write output files as human readable ascii files. write_binary : bool If true, MAGICC is configured to write binary output files. These files are much faster to process and write, but are not human readable. **kwargs: List of variables to write out. A list of possible options are as follows. This may not be a complete list. 'emissions', 'gwpemissions', 'sum_gwpemissions', 'concentrations', 'carboncycle', 'forcing', 'surfaceforcing', 'permafrost', 'temperature', 'sealevel', 'parameters', 'misc', 'lifetimes', 'timeseriesmix', 'rcpdata', 'summaryidx', 'inverseemis', 'tempoceanlayers', 'oceanarea', 'heatuptake', 'warnings', 'precipinput', 'aogcmtuning', 'ccycletuning', 'observationaltuning', 'keydata_1', 'keydata_2' """ assert ( write_ascii or write_binary ), "write_binary and/or write_ascii must be configured" if write_binary and write_ascii: ascii_binary = "BOTH" elif write_ascii: ascii_binary = "ASCII" else: ascii_binary = "BINARY" # defaults outconfig = { "out_emissions": 0, "out_gwpemissions": 0, "out_sum_gwpemissions": 0, "out_concentrations": 0, "out_carboncycle": 0, "out_forcing": 0, "out_surfaceforcing": 0, "out_permafrost": 0, "out_temperature": 0, "out_sealevel": 0, "out_parameters": 0, "out_misc": 0, "out_timeseriesmix": 0, "out_rcpdata": 0, "out_summaryidx": 0, "out_inverseemis": 0, "out_tempoceanlayers": 0, "out_heatuptake": 0, "out_ascii_binary": ascii_binary, "out_warnings": 0, "out_precipinput": 0, "out_aogcmtuning": 0, "out_ccycletuning": 0, "out_observationaltuning": 0, "out_keydata_1": 0, "out_keydata_2": 0, } if self.version == 7: outconfig["out_oceanarea"] = 0 outconfig["out_lifetimes"] = 0 for kw in kwargs: val = 1 if kwargs[kw] else 0 # convert values to 0/1 instead of booleans outconfig["out_" + kw.lower()] = val self.update_config(**outconfig)
[ "def", "set_output_variables", "(", "self", ",", "write_ascii", "=", "True", ",", "write_binary", "=", "False", ",", "*", "*", "kwargs", ")", ":", "assert", "(", "write_ascii", "or", "write_binary", ")", ",", "\"write_binary and/or write_ascii must be configured\"", "if", "write_binary", "and", "write_ascii", ":", "ascii_binary", "=", "\"BOTH\"", "elif", "write_ascii", ":", "ascii_binary", "=", "\"ASCII\"", "else", ":", "ascii_binary", "=", "\"BINARY\"", "# defaults", "outconfig", "=", "{", "\"out_emissions\"", ":", "0", ",", "\"out_gwpemissions\"", ":", "0", ",", "\"out_sum_gwpemissions\"", ":", "0", ",", "\"out_concentrations\"", ":", "0", ",", "\"out_carboncycle\"", ":", "0", ",", "\"out_forcing\"", ":", "0", ",", "\"out_surfaceforcing\"", ":", "0", ",", "\"out_permafrost\"", ":", "0", ",", "\"out_temperature\"", ":", "0", ",", "\"out_sealevel\"", ":", "0", ",", "\"out_parameters\"", ":", "0", ",", "\"out_misc\"", ":", "0", ",", "\"out_timeseriesmix\"", ":", "0", ",", "\"out_rcpdata\"", ":", "0", ",", "\"out_summaryidx\"", ":", "0", ",", "\"out_inverseemis\"", ":", "0", ",", "\"out_tempoceanlayers\"", ":", "0", ",", "\"out_heatuptake\"", ":", "0", ",", "\"out_ascii_binary\"", ":", "ascii_binary", ",", "\"out_warnings\"", ":", "0", ",", "\"out_precipinput\"", ":", "0", ",", "\"out_aogcmtuning\"", ":", "0", ",", "\"out_ccycletuning\"", ":", "0", ",", "\"out_observationaltuning\"", ":", "0", ",", "\"out_keydata_1\"", ":", "0", ",", "\"out_keydata_2\"", ":", "0", ",", "}", "if", "self", ".", "version", "==", "7", ":", "outconfig", "[", "\"out_oceanarea\"", "]", "=", "0", "outconfig", "[", "\"out_lifetimes\"", "]", "=", "0", "for", "kw", "in", "kwargs", ":", "val", "=", "1", "if", "kwargs", "[", "kw", "]", "else", "0", "# convert values to 0/1 instead of booleans", "outconfig", "[", "\"out_\"", "+", "kw", ".", "lower", "(", ")", "]", "=", "val", "self", ".", "update_config", "(", "*", "*", "outconfig", ")" ]
Set the output configuration, minimising output as much as possible There are a number of configuration parameters which control which variables are written to file and in which format. Limiting the variables that are written to file can greatly speed up the running of MAGICC. By default, calling this function without specifying any variables will disable all output by setting all of MAGICC's ``out_xx`` flags to ``0``. This convenience function should not be confused with ``set_config`` or ``update_config`` which allow the user to set/update the configuration flags directly, without the more convenient syntax and default behaviour provided by this function. Parameters ---------- write_ascii : bool If true, MAGICC is configured to write output files as human readable ascii files. write_binary : bool If true, MAGICC is configured to write binary output files. These files are much faster to process and write, but are not human readable. **kwargs: List of variables to write out. A list of possible options are as follows. This may not be a complete list. 'emissions', 'gwpemissions', 'sum_gwpemissions', 'concentrations', 'carboncycle', 'forcing', 'surfaceforcing', 'permafrost', 'temperature', 'sealevel', 'parameters', 'misc', 'lifetimes', 'timeseriesmix', 'rcpdata', 'summaryidx', 'inverseemis', 'tempoceanlayers', 'oceanarea', 'heatuptake', 'warnings', 'precipinput', 'aogcmtuning', 'ccycletuning', 'observationaltuning', 'keydata_1', 'keydata_2'
[ "Set", "the", "output", "configuration", "minimising", "output", "as", "much", "as", "possible" ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L632-L735
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.diagnose_tcr_ecs
def diagnose_tcr_ecs(self, **kwargs): """Diagnose TCR and ECS The transient climate response (TCR), is the global-mean temperature response at time at which atmopsheric |CO2| concentrations double in a scenario where atmospheric |CO2| concentrations are increased at 1% per year from pre-industrial levels. The equilibrium climate sensitivity (ECS), is the equilibrium global-mean temperature response to an instantaneous doubling of atmospheric |CO2| concentrations. As MAGICC has no hysteresis in its equilibrium response to radiative forcing, we can diagnose TCR and ECS with one experiment. However, please note that sometimes the run length won't be long enough to allow MAGICC's oceans to fully equilibrate and hence the ECS value might not be what you expect (it should match the value of ``core_climatesensitivity``). Parameters ---------- **kwargs parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4`` Returns ------- dict Dictionary with keys: "ecs" - the diagnosed ECS; "tcr" - the diagnosed TCR; "timeseries" - the relevant model input and output timeseries used in the experiment i.e. atmospheric |CO2| concentrations, total radiative forcing and global-mean surface temperature """ if self.version == 7: raise NotImplementedError("MAGICC7 cannot yet diagnose ECS and TCR") self._diagnose_tcr_ecs_config_setup(**kwargs) timeseries = self.run( scenario=None, only=[ "Atmospheric Concentrations|CO2", "Radiative Forcing", "Surface Temperature", ], ) tcr, ecs = self._get_tcr_ecs_from_diagnosis_results(timeseries) return {"tcr": tcr, "ecs": ecs, "timeseries": timeseries}
python
def diagnose_tcr_ecs(self, **kwargs): """Diagnose TCR and ECS The transient climate response (TCR), is the global-mean temperature response at time at which atmopsheric |CO2| concentrations double in a scenario where atmospheric |CO2| concentrations are increased at 1% per year from pre-industrial levels. The equilibrium climate sensitivity (ECS), is the equilibrium global-mean temperature response to an instantaneous doubling of atmospheric |CO2| concentrations. As MAGICC has no hysteresis in its equilibrium response to radiative forcing, we can diagnose TCR and ECS with one experiment. However, please note that sometimes the run length won't be long enough to allow MAGICC's oceans to fully equilibrate and hence the ECS value might not be what you expect (it should match the value of ``core_climatesensitivity``). Parameters ---------- **kwargs parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4`` Returns ------- dict Dictionary with keys: "ecs" - the diagnosed ECS; "tcr" - the diagnosed TCR; "timeseries" - the relevant model input and output timeseries used in the experiment i.e. atmospheric |CO2| concentrations, total radiative forcing and global-mean surface temperature """ if self.version == 7: raise NotImplementedError("MAGICC7 cannot yet diagnose ECS and TCR") self._diagnose_tcr_ecs_config_setup(**kwargs) timeseries = self.run( scenario=None, only=[ "Atmospheric Concentrations|CO2", "Radiative Forcing", "Surface Temperature", ], ) tcr, ecs = self._get_tcr_ecs_from_diagnosis_results(timeseries) return {"tcr": tcr, "ecs": ecs, "timeseries": timeseries}
[ "def", "diagnose_tcr_ecs", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "version", "==", "7", ":", "raise", "NotImplementedError", "(", "\"MAGICC7 cannot yet diagnose ECS and TCR\"", ")", "self", ".", "_diagnose_tcr_ecs_config_setup", "(", "*", "*", "kwargs", ")", "timeseries", "=", "self", ".", "run", "(", "scenario", "=", "None", ",", "only", "=", "[", "\"Atmospheric Concentrations|CO2\"", ",", "\"Radiative Forcing\"", ",", "\"Surface Temperature\"", ",", "]", ",", ")", "tcr", ",", "ecs", "=", "self", ".", "_get_tcr_ecs_from_diagnosis_results", "(", "timeseries", ")", "return", "{", "\"tcr\"", ":", "tcr", ",", "\"ecs\"", ":", "ecs", ",", "\"timeseries\"", ":", "timeseries", "}" ]
Diagnose TCR and ECS The transient climate response (TCR), is the global-mean temperature response at time at which atmopsheric |CO2| concentrations double in a scenario where atmospheric |CO2| concentrations are increased at 1% per year from pre-industrial levels. The equilibrium climate sensitivity (ECS), is the equilibrium global-mean temperature response to an instantaneous doubling of atmospheric |CO2| concentrations. As MAGICC has no hysteresis in its equilibrium response to radiative forcing, we can diagnose TCR and ECS with one experiment. However, please note that sometimes the run length won't be long enough to allow MAGICC's oceans to fully equilibrate and hence the ECS value might not be what you expect (it should match the value of ``core_climatesensitivity``). Parameters ---------- **kwargs parameter values to use in the diagnosis e.g. ``core_climatesensitivity=4`` Returns ------- dict Dictionary with keys: "ecs" - the diagnosed ECS; "tcr" - the diagnosed TCR; "timeseries" - the relevant model input and output timeseries used in the experiment i.e. atmospheric |CO2| concentrations, total radiative forcing and global-mean surface temperature
[ "Diagnose", "TCR", "and", "ECS" ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L740-L783
openclimatedata/pymagicc
pymagicc/core.py
MAGICCBase.set_emission_scenario_setup
def set_emission_scenario_setup(self, scenario, config_dict): """Set the emissions flags correctly. Parameters ---------- scenario : :obj:`pymagicc.io.MAGICCData` Scenario to run. config_dict : dict Dictionary with current input configurations which is to be validated and updated where necessary. Returns ------- dict Updated configuration """ self.write(scenario, self._scen_file_name) # can be lazy in this line as fix backwards key handles errors for us config_dict["file_emissionscenario"] = self._scen_file_name config_dict = self._fix_any_backwards_emissions_scen_key_in_config(config_dict) return config_dict
python
def set_emission_scenario_setup(self, scenario, config_dict): """Set the emissions flags correctly. Parameters ---------- scenario : :obj:`pymagicc.io.MAGICCData` Scenario to run. config_dict : dict Dictionary with current input configurations which is to be validated and updated where necessary. Returns ------- dict Updated configuration """ self.write(scenario, self._scen_file_name) # can be lazy in this line as fix backwards key handles errors for us config_dict["file_emissionscenario"] = self._scen_file_name config_dict = self._fix_any_backwards_emissions_scen_key_in_config(config_dict) return config_dict
[ "def", "set_emission_scenario_setup", "(", "self", ",", "scenario", ",", "config_dict", ")", ":", "self", ".", "write", "(", "scenario", ",", "self", ".", "_scen_file_name", ")", "# can be lazy in this line as fix backwards key handles errors for us", "config_dict", "[", "\"file_emissionscenario\"", "]", "=", "self", ".", "_scen_file_name", "config_dict", "=", "self", ".", "_fix_any_backwards_emissions_scen_key_in_config", "(", "config_dict", ")", "return", "config_dict" ]
Set the emissions flags correctly. Parameters ---------- scenario : :obj:`pymagicc.io.MAGICCData` Scenario to run. config_dict : dict Dictionary with current input configurations which is to be validated and updated where necessary. Returns ------- dict Updated configuration
[ "Set", "the", "emissions", "flags", "correctly", "." ]
train
https://github.com/openclimatedata/pymagicc/blob/d896014832cf458d1e95e5878fd6d5961f3e2e05/pymagicc/core.py#L903-L925
productml/blurr
blurr/core/type.py
Type.contains
def contains(value: Union[str, 'Type']) -> bool: """ Checks if a type is defined """ if isinstance(value, str): return any(value.lower() == i.value for i in Type) return any(value == i for i in Type)
python
def contains(value: Union[str, 'Type']) -> bool: """ Checks if a type is defined """ if isinstance(value, str): return any(value.lower() == i.value for i in Type) return any(value == i for i in Type)
[ "def", "contains", "(", "value", ":", "Union", "[", "str", ",", "'Type'", "]", ")", "->", "bool", ":", "if", "isinstance", "(", "value", ",", "str", ")", ":", "return", "any", "(", "value", ".", "lower", "(", ")", "==", "i", ".", "value", "for", "i", "in", "Type", ")", "return", "any", "(", "value", "==", "i", "for", "i", "in", "Type", ")" ]
Checks if a type is defined
[ "Checks", "if", "a", "type", "is", "defined" ]
train
https://github.com/productml/blurr/blob/1b688b2c4a9bbbb2139c58bf0682ddc05a6c24fa/blurr/core/type.py#L48-L53
hydpy-dev/hydpy
hydpy/exe/replacetools.py
xml_replace
def xml_replace(filename, **replacements): """Read the content of an XML template file (XMLT), apply the given `replacements` to its substitution markers, and write the result into an XML file with the same name but ending with `xml` instead of `xmlt`. First, we write an XMLT file, containing a regular HTML comment, a readily defined element `e1`, and some other elements with substitutions markers. Substitution markers are HTML comments starting and ending with the `|` character: >>> from hydpy import xml_replace, TestIO >>> with TestIO(): ... with open('test1.xmlt', 'w') as templatefile: ... _ = templatefile.write( ... '<!--a normal comment-->\\n' ... '<e1>element 1</e1>\\n' ... '<e2><!--|e2|--></e2>\\n' ... '<e3><!--|e3_|--></e3>\\n' ... '<e4><!--|e4=element 4|--></e4>\\n' ... '<e2><!--|e2|--></e2>') Function |xml_replace| can both be called within a Python session and from a command line. We start with the first type of application. Each substitution marker must be met by a keyword argument unless it holds a default value (`e4`). All arguments are converted to a |str| object (`e3`). Template files can use the same substitution marker multiple times (`e2`): >>> with TestIO(): ... xml_replace('test1', e2='E2', e3_=3, e4='ELEMENT 4') template file: test1.xmlt target file: test1.xml replacements: e2 --> E2 (given argument) e3_ --> 3 (given argument) e4 --> ELEMENT 4 (given argument) e2 --> E2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>E2</e2> <e3>3</e3> <e4>ELEMENT 4</e4> <e2>E2</e2> Without custom values, |xml_replace| applies predefined default values, if available (`e4`): >>> with TestIO(): ... xml_replace('test1', e2='E2', e3_=3) # doctest: +ELLIPSIS template file: test1.xmlt target file: test1.xml replacements: e2 --> E2 (given argument) e3_ --> 3 (given argument) e4 --> element 4 (default argument) e2 --> E2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>E2</e2> <e3>3</e3> <e4>element 4</e4> <e2>E2</e2> Missing and useless keyword arguments result in errors: >>> with TestIO(): ... xml_replace('test1', e2='E2') Traceback (most recent call last): ... RuntimeError: While trying to replace the markers `e2, e3_, and e4` \ of the XML template file `test1.xmlt` with the available keywords `e2`, \ the following error occurred: Marker `e3_` cannot be replaced. >>> with TestIO(): ... xml_replace('test1', e2='e2', e3_='E3', e4='e4', e5='e5') Traceback (most recent call last): ... RuntimeError: While trying to replace the markers `e2, e3_, and e4` \ of the XML template file `test1.xmlt` with the available keywords `e2, e3_, \ e4, and e5`, the following error occurred: Keyword(s) `e5` cannot be used. Using different default values for the same substitution marker is not allowed: >>> from hydpy import pub, TestIO, xml_replace >>> with TestIO(): ... with open('test2.xmlt', 'w') as templatefile: ... _ = templatefile.write( ... '<e4><!--|e4=element 4|--></e4>\\n' ... '<e4><!--|e4=ELEMENT 4|--></e4>') >>> with TestIO(): ... xml_replace('test2', e4=4) template file: test2.xmlt target file: test2.xml replacements: e4 --> 4 (given argument) e4 --> 4 (given argument) >>> with TestIO(): ... with open('test2.xml') as targetfile: ... print(targetfile.read()) <e4>4</e4> <e4>4</e4> >>> with TestIO(): ... xml_replace('test2') Traceback (most recent call last): ... RuntimeError: Template file `test2.xmlt` defines different default values \ for marker `e4`. As mentioned above, function |xml_replace| is registered as a "script function" and can thus be used via command line: >>> pub.scriptfunctions['xml_replace'].__name__ 'xml_replace' >>> pub.scriptfunctions['xml_replace'].__module__ 'hydpy.exe.replacetools' Use script |hyd| to execute function |xml_replace|: >>> from hydpy import run_subprocess >>> with TestIO(): ... run_subprocess( ... 'hyd.py xml_replace test1 e2="Element 2" e3_=3') template file: test1.xmlt target file: test1.xml replacements: e2 --> Element 2 (given argument) e3_ --> 3 (given argument) e4 --> element 4 (default argument) e2 --> Element 2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>Element 2</e2> <e3>3</e3> <e4>element 4</e4> <e2>Element 2</e2> """ keywords = set(replacements.keys()) templatename = f'{filename}.xmlt' targetname = f'{filename}.xml' print(f'template file: {templatename}') print(f'target file: {targetname}') print('replacements:') with open(templatename) as templatefile: templatebody = templatefile.read() parts = templatebody.replace('<!--|', '|-->').split('|-->') defaults = {} for idx, part in enumerate(parts): if idx % 2: subparts = part.partition('=') if subparts[2]: parts[idx] = subparts[0] if subparts[0] not in replacements: if ((subparts[0] in defaults) and (defaults[subparts[0]] != str(subparts[2]))): raise RuntimeError( f'Template file `{templatename}` defines ' f'different default values for marker ' f'`{subparts[0]}`.') defaults[subparts[0]] = str(subparts[2]) markers = parts[1::2] try: unused_keywords = keywords.copy() for idx, part in enumerate(parts): if idx % 2: argument_info = 'given argument' newpart = replacements.get(part) if newpart is None: argument_info = 'default argument' newpart = defaults.get(part) if newpart is None: raise RuntimeError( f'Marker `{part}` cannot be replaced.') print(f' {part} --> {newpart} ({argument_info})') parts[idx] = str(newpart) unused_keywords.discard(part) targetbody = ''.join(parts) if unused_keywords: raise RuntimeError( f'Keyword(s) `{objecttools.enumeration(unused_keywords)}` ' f'cannot be used.') with open(targetname, 'w') as targetfile: targetfile.write(targetbody) except BaseException: objecttools.augment_excmessage( f'While trying to replace the markers ' f'`{objecttools.enumeration(sorted(set(markers)))}` of the ' f'XML template file `{templatename}` with the available ' f'keywords `{objecttools.enumeration(sorted(keywords))}`')
python
def xml_replace(filename, **replacements): """Read the content of an XML template file (XMLT), apply the given `replacements` to its substitution markers, and write the result into an XML file with the same name but ending with `xml` instead of `xmlt`. First, we write an XMLT file, containing a regular HTML comment, a readily defined element `e1`, and some other elements with substitutions markers. Substitution markers are HTML comments starting and ending with the `|` character: >>> from hydpy import xml_replace, TestIO >>> with TestIO(): ... with open('test1.xmlt', 'w') as templatefile: ... _ = templatefile.write( ... '<!--a normal comment-->\\n' ... '<e1>element 1</e1>\\n' ... '<e2><!--|e2|--></e2>\\n' ... '<e3><!--|e3_|--></e3>\\n' ... '<e4><!--|e4=element 4|--></e4>\\n' ... '<e2><!--|e2|--></e2>') Function |xml_replace| can both be called within a Python session and from a command line. We start with the first type of application. Each substitution marker must be met by a keyword argument unless it holds a default value (`e4`). All arguments are converted to a |str| object (`e3`). Template files can use the same substitution marker multiple times (`e2`): >>> with TestIO(): ... xml_replace('test1', e2='E2', e3_=3, e4='ELEMENT 4') template file: test1.xmlt target file: test1.xml replacements: e2 --> E2 (given argument) e3_ --> 3 (given argument) e4 --> ELEMENT 4 (given argument) e2 --> E2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>E2</e2> <e3>3</e3> <e4>ELEMENT 4</e4> <e2>E2</e2> Without custom values, |xml_replace| applies predefined default values, if available (`e4`): >>> with TestIO(): ... xml_replace('test1', e2='E2', e3_=3) # doctest: +ELLIPSIS template file: test1.xmlt target file: test1.xml replacements: e2 --> E2 (given argument) e3_ --> 3 (given argument) e4 --> element 4 (default argument) e2 --> E2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>E2</e2> <e3>3</e3> <e4>element 4</e4> <e2>E2</e2> Missing and useless keyword arguments result in errors: >>> with TestIO(): ... xml_replace('test1', e2='E2') Traceback (most recent call last): ... RuntimeError: While trying to replace the markers `e2, e3_, and e4` \ of the XML template file `test1.xmlt` with the available keywords `e2`, \ the following error occurred: Marker `e3_` cannot be replaced. >>> with TestIO(): ... xml_replace('test1', e2='e2', e3_='E3', e4='e4', e5='e5') Traceback (most recent call last): ... RuntimeError: While trying to replace the markers `e2, e3_, and e4` \ of the XML template file `test1.xmlt` with the available keywords `e2, e3_, \ e4, and e5`, the following error occurred: Keyword(s) `e5` cannot be used. Using different default values for the same substitution marker is not allowed: >>> from hydpy import pub, TestIO, xml_replace >>> with TestIO(): ... with open('test2.xmlt', 'w') as templatefile: ... _ = templatefile.write( ... '<e4><!--|e4=element 4|--></e4>\\n' ... '<e4><!--|e4=ELEMENT 4|--></e4>') >>> with TestIO(): ... xml_replace('test2', e4=4) template file: test2.xmlt target file: test2.xml replacements: e4 --> 4 (given argument) e4 --> 4 (given argument) >>> with TestIO(): ... with open('test2.xml') as targetfile: ... print(targetfile.read()) <e4>4</e4> <e4>4</e4> >>> with TestIO(): ... xml_replace('test2') Traceback (most recent call last): ... RuntimeError: Template file `test2.xmlt` defines different default values \ for marker `e4`. As mentioned above, function |xml_replace| is registered as a "script function" and can thus be used via command line: >>> pub.scriptfunctions['xml_replace'].__name__ 'xml_replace' >>> pub.scriptfunctions['xml_replace'].__module__ 'hydpy.exe.replacetools' Use script |hyd| to execute function |xml_replace|: >>> from hydpy import run_subprocess >>> with TestIO(): ... run_subprocess( ... 'hyd.py xml_replace test1 e2="Element 2" e3_=3') template file: test1.xmlt target file: test1.xml replacements: e2 --> Element 2 (given argument) e3_ --> 3 (given argument) e4 --> element 4 (default argument) e2 --> Element 2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>Element 2</e2> <e3>3</e3> <e4>element 4</e4> <e2>Element 2</e2> """ keywords = set(replacements.keys()) templatename = f'{filename}.xmlt' targetname = f'{filename}.xml' print(f'template file: {templatename}') print(f'target file: {targetname}') print('replacements:') with open(templatename) as templatefile: templatebody = templatefile.read() parts = templatebody.replace('<!--|', '|-->').split('|-->') defaults = {} for idx, part in enumerate(parts): if idx % 2: subparts = part.partition('=') if subparts[2]: parts[idx] = subparts[0] if subparts[0] not in replacements: if ((subparts[0] in defaults) and (defaults[subparts[0]] != str(subparts[2]))): raise RuntimeError( f'Template file `{templatename}` defines ' f'different default values for marker ' f'`{subparts[0]}`.') defaults[subparts[0]] = str(subparts[2]) markers = parts[1::2] try: unused_keywords = keywords.copy() for idx, part in enumerate(parts): if idx % 2: argument_info = 'given argument' newpart = replacements.get(part) if newpart is None: argument_info = 'default argument' newpart = defaults.get(part) if newpart is None: raise RuntimeError( f'Marker `{part}` cannot be replaced.') print(f' {part} --> {newpart} ({argument_info})') parts[idx] = str(newpart) unused_keywords.discard(part) targetbody = ''.join(parts) if unused_keywords: raise RuntimeError( f'Keyword(s) `{objecttools.enumeration(unused_keywords)}` ' f'cannot be used.') with open(targetname, 'w') as targetfile: targetfile.write(targetbody) except BaseException: objecttools.augment_excmessage( f'While trying to replace the markers ' f'`{objecttools.enumeration(sorted(set(markers)))}` of the ' f'XML template file `{templatename}` with the available ' f'keywords `{objecttools.enumeration(sorted(keywords))}`')
[ "def", "xml_replace", "(", "filename", ",", "*", "*", "replacements", ")", ":", "keywords", "=", "set", "(", "replacements", ".", "keys", "(", ")", ")", "templatename", "=", "f'{filename}.xmlt'", "targetname", "=", "f'{filename}.xml'", "print", "(", "f'template file: {templatename}'", ")", "print", "(", "f'target file: {targetname}'", ")", "print", "(", "'replacements:'", ")", "with", "open", "(", "templatename", ")", "as", "templatefile", ":", "templatebody", "=", "templatefile", ".", "read", "(", ")", "parts", "=", "templatebody", ".", "replace", "(", "'<!--|'", ",", "'|-->'", ")", ".", "split", "(", "'|-->'", ")", "defaults", "=", "{", "}", "for", "idx", ",", "part", "in", "enumerate", "(", "parts", ")", ":", "if", "idx", "%", "2", ":", "subparts", "=", "part", ".", "partition", "(", "'='", ")", "if", "subparts", "[", "2", "]", ":", "parts", "[", "idx", "]", "=", "subparts", "[", "0", "]", "if", "subparts", "[", "0", "]", "not", "in", "replacements", ":", "if", "(", "(", "subparts", "[", "0", "]", "in", "defaults", ")", "and", "(", "defaults", "[", "subparts", "[", "0", "]", "]", "!=", "str", "(", "subparts", "[", "2", "]", ")", ")", ")", ":", "raise", "RuntimeError", "(", "f'Template file `{templatename}` defines '", "f'different default values for marker '", "f'`{subparts[0]}`.'", ")", "defaults", "[", "subparts", "[", "0", "]", "]", "=", "str", "(", "subparts", "[", "2", "]", ")", "markers", "=", "parts", "[", "1", ":", ":", "2", "]", "try", ":", "unused_keywords", "=", "keywords", ".", "copy", "(", ")", "for", "idx", ",", "part", "in", "enumerate", "(", "parts", ")", ":", "if", "idx", "%", "2", ":", "argument_info", "=", "'given argument'", "newpart", "=", "replacements", ".", "get", "(", "part", ")", "if", "newpart", "is", "None", ":", "argument_info", "=", "'default argument'", "newpart", "=", "defaults", ".", "get", "(", "part", ")", "if", "newpart", "is", "None", ":", "raise", "RuntimeError", "(", "f'Marker `{part}` cannot be replaced.'", ")", "print", "(", "f' {part} --> {newpart} ({argument_info})'", ")", "parts", "[", "idx", "]", "=", "str", "(", "newpart", ")", "unused_keywords", ".", "discard", "(", "part", ")", "targetbody", "=", "''", ".", "join", "(", "parts", ")", "if", "unused_keywords", ":", "raise", "RuntimeError", "(", "f'Keyword(s) `{objecttools.enumeration(unused_keywords)}` '", "f'cannot be used.'", ")", "with", "open", "(", "targetname", ",", "'w'", ")", "as", "targetfile", ":", "targetfile", ".", "write", "(", "targetbody", ")", "except", "BaseException", ":", "objecttools", ".", "augment_excmessage", "(", "f'While trying to replace the markers '", "f'`{objecttools.enumeration(sorted(set(markers)))}` of the '", "f'XML template file `{templatename}` with the available '", "f'keywords `{objecttools.enumeration(sorted(keywords))}`'", ")" ]
Read the content of an XML template file (XMLT), apply the given `replacements` to its substitution markers, and write the result into an XML file with the same name but ending with `xml` instead of `xmlt`. First, we write an XMLT file, containing a regular HTML comment, a readily defined element `e1`, and some other elements with substitutions markers. Substitution markers are HTML comments starting and ending with the `|` character: >>> from hydpy import xml_replace, TestIO >>> with TestIO(): ... with open('test1.xmlt', 'w') as templatefile: ... _ = templatefile.write( ... '<!--a normal comment-->\\n' ... '<e1>element 1</e1>\\n' ... '<e2><!--|e2|--></e2>\\n' ... '<e3><!--|e3_|--></e3>\\n' ... '<e4><!--|e4=element 4|--></e4>\\n' ... '<e2><!--|e2|--></e2>') Function |xml_replace| can both be called within a Python session and from a command line. We start with the first type of application. Each substitution marker must be met by a keyword argument unless it holds a default value (`e4`). All arguments are converted to a |str| object (`e3`). Template files can use the same substitution marker multiple times (`e2`): >>> with TestIO(): ... xml_replace('test1', e2='E2', e3_=3, e4='ELEMENT 4') template file: test1.xmlt target file: test1.xml replacements: e2 --> E2 (given argument) e3_ --> 3 (given argument) e4 --> ELEMENT 4 (given argument) e2 --> E2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>E2</e2> <e3>3</e3> <e4>ELEMENT 4</e4> <e2>E2</e2> Without custom values, |xml_replace| applies predefined default values, if available (`e4`): >>> with TestIO(): ... xml_replace('test1', e2='E2', e3_=3) # doctest: +ELLIPSIS template file: test1.xmlt target file: test1.xml replacements: e2 --> E2 (given argument) e3_ --> 3 (given argument) e4 --> element 4 (default argument) e2 --> E2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>E2</e2> <e3>3</e3> <e4>element 4</e4> <e2>E2</e2> Missing and useless keyword arguments result in errors: >>> with TestIO(): ... xml_replace('test1', e2='E2') Traceback (most recent call last): ... RuntimeError: While trying to replace the markers `e2, e3_, and e4` \ of the XML template file `test1.xmlt` with the available keywords `e2`, \ the following error occurred: Marker `e3_` cannot be replaced. >>> with TestIO(): ... xml_replace('test1', e2='e2', e3_='E3', e4='e4', e5='e5') Traceback (most recent call last): ... RuntimeError: While trying to replace the markers `e2, e3_, and e4` \ of the XML template file `test1.xmlt` with the available keywords `e2, e3_, \ e4, and e5`, the following error occurred: Keyword(s) `e5` cannot be used. Using different default values for the same substitution marker is not allowed: >>> from hydpy import pub, TestIO, xml_replace >>> with TestIO(): ... with open('test2.xmlt', 'w') as templatefile: ... _ = templatefile.write( ... '<e4><!--|e4=element 4|--></e4>\\n' ... '<e4><!--|e4=ELEMENT 4|--></e4>') >>> with TestIO(): ... xml_replace('test2', e4=4) template file: test2.xmlt target file: test2.xml replacements: e4 --> 4 (given argument) e4 --> 4 (given argument) >>> with TestIO(): ... with open('test2.xml') as targetfile: ... print(targetfile.read()) <e4>4</e4> <e4>4</e4> >>> with TestIO(): ... xml_replace('test2') Traceback (most recent call last): ... RuntimeError: Template file `test2.xmlt` defines different default values \ for marker `e4`. As mentioned above, function |xml_replace| is registered as a "script function" and can thus be used via command line: >>> pub.scriptfunctions['xml_replace'].__name__ 'xml_replace' >>> pub.scriptfunctions['xml_replace'].__module__ 'hydpy.exe.replacetools' Use script |hyd| to execute function |xml_replace|: >>> from hydpy import run_subprocess >>> with TestIO(): ... run_subprocess( ... 'hyd.py xml_replace test1 e2="Element 2" e3_=3') template file: test1.xmlt target file: test1.xml replacements: e2 --> Element 2 (given argument) e3_ --> 3 (given argument) e4 --> element 4 (default argument) e2 --> Element 2 (given argument) >>> with TestIO(): ... with open('test1.xml') as targetfile: ... print(targetfile.read()) <!--a normal comment--> <e1>element 1</e1> <e2>Element 2</e2> <e3>3</e3> <e4>element 4</e4> <e2>Element 2</e2>
[ "Read", "the", "content", "of", "an", "XML", "template", "file", "(", "XMLT", ")", "apply", "the", "given", "replacements", "to", "its", "substitution", "markers", "and", "write", "the", "result", "into", "an", "XML", "file", "with", "the", "same", "name", "but", "ending", "with", "xml", "instead", "of", "xmlt", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/exe/replacetools.py#L9-L211
hydpy-dev/hydpy
hydpy/core/indextools.py
IndexerProperty._calcidxs
def _calcidxs(func): """Return the required indexes based on the given lambda function and the |Timegrids| object handled by module |pub|. Raise a |RuntimeError| if the latter is not available. """ timegrids = hydpy.pub.get('timegrids') if timegrids is None: raise RuntimeError( 'An Indexer object has been asked for an %s array. Such an ' 'array has neither been determined yet nor can it be ' 'determined automatically at the moment. Either define an ' '%s array manually and pass it to the Indexer object, or make ' 'a proper Timegrids object available within the pub module. ' 'In usual HydPy applications, the latter is done ' 'automatically.' % (func.__name__, func.__name__)) idxs = numpy.empty(len(timegrids.init), dtype=int) for jdx, date in enumerate(hydpy.pub.timegrids.init): idxs[jdx] = func(date) return idxs
python
def _calcidxs(func): """Return the required indexes based on the given lambda function and the |Timegrids| object handled by module |pub|. Raise a |RuntimeError| if the latter is not available. """ timegrids = hydpy.pub.get('timegrids') if timegrids is None: raise RuntimeError( 'An Indexer object has been asked for an %s array. Such an ' 'array has neither been determined yet nor can it be ' 'determined automatically at the moment. Either define an ' '%s array manually and pass it to the Indexer object, or make ' 'a proper Timegrids object available within the pub module. ' 'In usual HydPy applications, the latter is done ' 'automatically.' % (func.__name__, func.__name__)) idxs = numpy.empty(len(timegrids.init), dtype=int) for jdx, date in enumerate(hydpy.pub.timegrids.init): idxs[jdx] = func(date) return idxs
[ "def", "_calcidxs", "(", "func", ")", ":", "timegrids", "=", "hydpy", ".", "pub", ".", "get", "(", "'timegrids'", ")", "if", "timegrids", "is", "None", ":", "raise", "RuntimeError", "(", "'An Indexer object has been asked for an %s array. Such an '", "'array has neither been determined yet nor can it be '", "'determined automatically at the moment. Either define an '", "'%s array manually and pass it to the Indexer object, or make '", "'a proper Timegrids object available within the pub module. '", "'In usual HydPy applications, the latter is done '", "'automatically.'", "%", "(", "func", ".", "__name__", ",", "func", ".", "__name__", ")", ")", "idxs", "=", "numpy", ".", "empty", "(", "len", "(", "timegrids", ".", "init", ")", ",", "dtype", "=", "int", ")", "for", "jdx", ",", "date", "in", "enumerate", "(", "hydpy", ".", "pub", ".", "timegrids", ".", "init", ")", ":", "idxs", "[", "jdx", "]", "=", "func", "(", "date", ")", "return", "idxs" ]
Return the required indexes based on the given lambda function and the |Timegrids| object handled by module |pub|. Raise a |RuntimeError| if the latter is not available.
[ "Return", "the", "required", "indexes", "based", "on", "the", "given", "lambda", "function", "and", "the", "|Timegrids|", "object", "handled", "by", "module", "|pub|", ".", "Raise", "a", "|RuntimeError|", "if", "the", "latter", "is", "not", "available", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/indextools.py#L132-L151
hydpy-dev/hydpy
hydpy/core/indextools.py
Indexer.dayofyear
def dayofyear(self): """Day of the year index (the first of January = 0...). For reasons of consistency between leap years and non-leap years, assuming a daily time step, index 59 is always associated with the 29th of February. Hence, it is missing in non-leap years: >>> from hydpy import pub >>> from hydpy.core.indextools import Indexer >>> pub.timegrids = '27.02.2004', '3.03.2004', '1d' >>> Indexer().dayofyear array([57, 58, 59, 60, 61]) >>> pub.timegrids = '27.02.2005', '3.03.2005', '1d' >>> Indexer().dayofyear array([57, 58, 60, 61]) """ def _dayofyear(date): return (date.dayofyear-1 + ((date.month > 2) and (not date.leapyear))) return _dayofyear
python
def dayofyear(self): """Day of the year index (the first of January = 0...). For reasons of consistency between leap years and non-leap years, assuming a daily time step, index 59 is always associated with the 29th of February. Hence, it is missing in non-leap years: >>> from hydpy import pub >>> from hydpy.core.indextools import Indexer >>> pub.timegrids = '27.02.2004', '3.03.2004', '1d' >>> Indexer().dayofyear array([57, 58, 59, 60, 61]) >>> pub.timegrids = '27.02.2005', '3.03.2005', '1d' >>> Indexer().dayofyear array([57, 58, 60, 61]) """ def _dayofyear(date): return (date.dayofyear-1 + ((date.month > 2) and (not date.leapyear))) return _dayofyear
[ "def", "dayofyear", "(", "self", ")", ":", "def", "_dayofyear", "(", "date", ")", ":", "return", "(", "date", ".", "dayofyear", "-", "1", "+", "(", "(", "date", ".", "month", ">", "2", ")", "and", "(", "not", "date", ".", "leapyear", ")", ")", ")", "return", "_dayofyear" ]
Day of the year index (the first of January = 0...). For reasons of consistency between leap years and non-leap years, assuming a daily time step, index 59 is always associated with the 29th of February. Hence, it is missing in non-leap years: >>> from hydpy import pub >>> from hydpy.core.indextools import Indexer >>> pub.timegrids = '27.02.2004', '3.03.2004', '1d' >>> Indexer().dayofyear array([57, 58, 59, 60, 61]) >>> pub.timegrids = '27.02.2005', '3.03.2005', '1d' >>> Indexer().dayofyear array([57, 58, 60, 61])
[ "Day", "of", "the", "year", "index", "(", "the", "first", "of", "January", "=", "0", "...", ")", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/indextools.py#L188-L207
hydpy-dev/hydpy
hydpy/core/indextools.py
Indexer.timeofyear
def timeofyear(self): """Time of the year index (first simulation step of each year = 0...). The property |Indexer.timeofyear| is best explained through comparing it with property |Indexer.dayofyear|: Let us reconsider one of the examples of the documentation on property |Indexer.dayofyear|: >>> from hydpy import pub >>> from hydpy import Timegrids, Timegrid >>> from hydpy.core.indextools import Indexer >>> pub.timegrids = '27.02.2005', '3.03.2005', '1d' Due to the simulation stepsize being one day, the index arrays calculated by both properties are identical: >>> Indexer().dayofyear array([57, 58, 60, 61]) >>> Indexer().timeofyear array([57, 58, 60, 61]) In the next example the step size is halved: >>> pub.timegrids = '27.02.2005', '3.03.2005', '12h' Now the there a generally two subsequent simulation steps associated with the same day: >>> Indexer().dayofyear array([57, 57, 58, 58, 60, 60, 61, 61]) However, the `timeofyear` array gives the index of the respective simulation steps of the actual year: >>> Indexer().timeofyear array([114, 115, 116, 117, 120, 121, 122, 123]) Note the gap in the returned index array due to 2005 being not a leap year. """ refgrid = timetools.Timegrid( timetools.Date('2000.01.01'), timetools.Date('2001.01.01'), hydpy.pub.timegrids.stepsize) def _timeofyear(date): date = copy.deepcopy(date) date.year = 2000 return refgrid[date] return _timeofyear
python
def timeofyear(self): """Time of the year index (first simulation step of each year = 0...). The property |Indexer.timeofyear| is best explained through comparing it with property |Indexer.dayofyear|: Let us reconsider one of the examples of the documentation on property |Indexer.dayofyear|: >>> from hydpy import pub >>> from hydpy import Timegrids, Timegrid >>> from hydpy.core.indextools import Indexer >>> pub.timegrids = '27.02.2005', '3.03.2005', '1d' Due to the simulation stepsize being one day, the index arrays calculated by both properties are identical: >>> Indexer().dayofyear array([57, 58, 60, 61]) >>> Indexer().timeofyear array([57, 58, 60, 61]) In the next example the step size is halved: >>> pub.timegrids = '27.02.2005', '3.03.2005', '12h' Now the there a generally two subsequent simulation steps associated with the same day: >>> Indexer().dayofyear array([57, 57, 58, 58, 60, 60, 61, 61]) However, the `timeofyear` array gives the index of the respective simulation steps of the actual year: >>> Indexer().timeofyear array([114, 115, 116, 117, 120, 121, 122, 123]) Note the gap in the returned index array due to 2005 being not a leap year. """ refgrid = timetools.Timegrid( timetools.Date('2000.01.01'), timetools.Date('2001.01.01'), hydpy.pub.timegrids.stepsize) def _timeofyear(date): date = copy.deepcopy(date) date.year = 2000 return refgrid[date] return _timeofyear
[ "def", "timeofyear", "(", "self", ")", ":", "refgrid", "=", "timetools", ".", "Timegrid", "(", "timetools", ".", "Date", "(", "'2000.01.01'", ")", ",", "timetools", ".", "Date", "(", "'2001.01.01'", ")", ",", "hydpy", ".", "pub", ".", "timegrids", ".", "stepsize", ")", "def", "_timeofyear", "(", "date", ")", ":", "date", "=", "copy", ".", "deepcopy", "(", "date", ")", "date", ".", "year", "=", "2000", "return", "refgrid", "[", "date", "]", "return", "_timeofyear" ]
Time of the year index (first simulation step of each year = 0...). The property |Indexer.timeofyear| is best explained through comparing it with property |Indexer.dayofyear|: Let us reconsider one of the examples of the documentation on property |Indexer.dayofyear|: >>> from hydpy import pub >>> from hydpy import Timegrids, Timegrid >>> from hydpy.core.indextools import Indexer >>> pub.timegrids = '27.02.2005', '3.03.2005', '1d' Due to the simulation stepsize being one day, the index arrays calculated by both properties are identical: >>> Indexer().dayofyear array([57, 58, 60, 61]) >>> Indexer().timeofyear array([57, 58, 60, 61]) In the next example the step size is halved: >>> pub.timegrids = '27.02.2005', '3.03.2005', '12h' Now the there a generally two subsequent simulation steps associated with the same day: >>> Indexer().dayofyear array([57, 57, 58, 58, 60, 60, 61, 61]) However, the `timeofyear` array gives the index of the respective simulation steps of the actual year: >>> Indexer().timeofyear array([114, 115, 116, 117, 120, 121, 122, 123]) Note the gap in the returned index array due to 2005 being not a leap year.
[ "Time", "of", "the", "year", "index", "(", "first", "simulation", "step", "of", "each", "year", "=", "0", "...", ")", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/indextools.py#L210-L261
hydpy-dev/hydpy
hydpy/core/propertytools.py
BaseProperty.set_doc
def set_doc(self, doc: str): """Assign the given docstring to the property instance and, if possible, to the `__test__` dictionary of the module of its owner class.""" self.__doc__ = doc if hasattr(self, 'module'): ref = f'{self.objtype.__name__}.{self.name}' self.module.__dict__['__test__'][ref] = doc
python
def set_doc(self, doc: str): """Assign the given docstring to the property instance and, if possible, to the `__test__` dictionary of the module of its owner class.""" self.__doc__ = doc if hasattr(self, 'module'): ref = f'{self.objtype.__name__}.{self.name}' self.module.__dict__['__test__'][ref] = doc
[ "def", "set_doc", "(", "self", ",", "doc", ":", "str", ")", ":", "self", ".", "__doc__", "=", "doc", "if", "hasattr", "(", "self", ",", "'module'", ")", ":", "ref", "=", "f'{self.objtype.__name__}.{self.name}'", "self", ".", "module", ".", "__dict__", "[", "'__test__'", "]", "[", "ref", "]", "=", "doc" ]
Assign the given docstring to the property instance and, if possible, to the `__test__` dictionary of the module of its owner class.
[ "Assign", "the", "given", "docstring", "to", "the", "property", "instance", "and", "if", "possible", "to", "the", "__test__", "dictionary", "of", "the", "module", "of", "its", "owner", "class", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/propertytools.py#L139-L146
hydpy-dev/hydpy
hydpy/core/propertytools.py
BaseProperty.getter_
def getter_(self, fget) -> 'BaseProperty': """Add the given getter function and its docstring to the property and return it.""" self.fget = fget self.set_doc(fget.__doc__) return self
python
def getter_(self, fget) -> 'BaseProperty': """Add the given getter function and its docstring to the property and return it.""" self.fget = fget self.set_doc(fget.__doc__) return self
[ "def", "getter_", "(", "self", ",", "fget", ")", "->", "'BaseProperty'", ":", "self", ".", "fget", "=", "fget", "self", ".", "set_doc", "(", "fget", ".", "__doc__", ")", "return", "self" ]
Add the given getter function and its docstring to the property and return it.
[ "Add", "the", "given", "getter", "function", "and", "its", "docstring", "to", "the", "property", "and", "return", "it", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/propertytools.py#L160-L165
hydpy-dev/hydpy
hydpy/core/propertytools.py
ProtectedProperty.isready
def isready(self, obj) -> bool: """Return |True| or |False| to indicate if the protected property is ready for the given object. If the object is unknow, |ProtectedProperty| returns |False|.""" return vars(obj).get(self.name, False)
python
def isready(self, obj) -> bool: """Return |True| or |False| to indicate if the protected property is ready for the given object. If the object is unknow, |ProtectedProperty| returns |False|.""" return vars(obj).get(self.name, False)
[ "def", "isready", "(", "self", ",", "obj", ")", "->", "bool", ":", "return", "vars", "(", "obj", ")", ".", "get", "(", "self", ".", "name", ",", "False", ")" ]
Return |True| or |False| to indicate if the protected property is ready for the given object. If the object is unknow, |ProtectedProperty| returns |False|.
[ "Return", "|True|", "or", "|False|", "to", "indicate", "if", "the", "protected", "property", "is", "ready", "for", "the", "given", "object", ".", "If", "the", "object", "is", "unknow", "|ProtectedProperty|", "returns", "|False|", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/propertytools.py#L276-L280
hydpy-dev/hydpy
hydpy/core/propertytools.py
ProtectedProperties.allready
def allready(self, obj) -> bool: """Return |True| or |False| to indicate whether all protected properties are ready or not.""" for prop in self.__properties: if not prop.isready(obj): return False return True
python
def allready(self, obj) -> bool: """Return |True| or |False| to indicate whether all protected properties are ready or not.""" for prop in self.__properties: if not prop.isready(obj): return False return True
[ "def", "allready", "(", "self", ",", "obj", ")", "->", "bool", ":", "for", "prop", "in", "self", ".", "__properties", ":", "if", "not", "prop", ".", "isready", "(", "obj", ")", ":", "return", "False", "return", "True" ]
Return |True| or |False| to indicate whether all protected properties are ready or not.
[ "Return", "|True|", "or", "|False|", "to", "indicate", "whether", "all", "protected", "properties", "are", "ready", "or", "not", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/propertytools.py#L324-L330
hydpy-dev/hydpy
hydpy/core/propertytools.py
DefaultProperty.call_fget
def call_fget(self, obj) -> Any: """Return the predefined custom value when available, otherwise, the value defined by the getter function.""" custom = vars(obj).get(self.name) if custom is None: return self.fget(obj) return custom
python
def call_fget(self, obj) -> Any: """Return the predefined custom value when available, otherwise, the value defined by the getter function.""" custom = vars(obj).get(self.name) if custom is None: return self.fget(obj) return custom
[ "def", "call_fget", "(", "self", ",", "obj", ")", "->", "Any", ":", "custom", "=", "vars", "(", "obj", ")", ".", "get", "(", "self", ".", "name", ")", "if", "custom", "is", "None", ":", "return", "self", ".", "fget", "(", "obj", ")", "return", "custom" ]
Return the predefined custom value when available, otherwise, the value defined by the getter function.
[ "Return", "the", "predefined", "custom", "value", "when", "available", "otherwise", "the", "value", "defined", "by", "the", "getter", "function", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/propertytools.py#L528-L534
hydpy-dev/hydpy
hydpy/core/propertytools.py
DefaultProperty.call_fset
def call_fset(self, obj, value) -> None: """Store the given custom value and call the setter function.""" vars(obj)[self.name] = self.fset(obj, value)
python
def call_fset(self, obj, value) -> None: """Store the given custom value and call the setter function.""" vars(obj)[self.name] = self.fset(obj, value)
[ "def", "call_fset", "(", "self", ",", "obj", ",", "value", ")", "->", "None", ":", "vars", "(", "obj", ")", "[", "self", ".", "name", "]", "=", "self", ".", "fset", "(", "obj", ",", "value", ")" ]
Store the given custom value and call the setter function.
[ "Store", "the", "given", "custom", "value", "and", "call", "the", "setter", "function", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/propertytools.py#L536-L538
hydpy-dev/hydpy
hydpy/core/propertytools.py
DefaultProperty.call_fdel
def call_fdel(self, obj) -> None: """Remove the predefined custom value and call the delete function.""" self.fdel(obj) try: del vars(obj)[self.name] except KeyError: pass
python
def call_fdel(self, obj) -> None: """Remove the predefined custom value and call the delete function.""" self.fdel(obj) try: del vars(obj)[self.name] except KeyError: pass
[ "def", "call_fdel", "(", "self", ",", "obj", ")", "->", "None", ":", "self", ".", "fdel", "(", "obj", ")", "try", ":", "del", "vars", "(", "obj", ")", "[", "self", ".", "name", "]", "except", "KeyError", ":", "pass" ]
Remove the predefined custom value and call the delete function.
[ "Remove", "the", "predefined", "custom", "value", "and", "call", "the", "delete", "function", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/propertytools.py#L540-L546
hydpy-dev/hydpy
hydpy/models/lland/lland_control.py
RelWZ.trim
def trim(self, lower=None, upper=None): """Trim upper values in accordance with :math:`RelWB \\leq RelWZ`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> lnk(ACKER) >>> relwb.values = 0.5 >>> relwz(0.2, 0.5, 0.8) >>> relwz relwz(0.5, 0.5, 0.8) """ if lower is None: lower = getattr(self.subpars.relwb, 'value', None) lland_parameters.ParameterSoil.trim(self, lower, upper)
python
def trim(self, lower=None, upper=None): """Trim upper values in accordance with :math:`RelWB \\leq RelWZ`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> lnk(ACKER) >>> relwb.values = 0.5 >>> relwz(0.2, 0.5, 0.8) >>> relwz relwz(0.5, 0.5, 0.8) """ if lower is None: lower = getattr(self.subpars.relwb, 'value', None) lland_parameters.ParameterSoil.trim(self, lower, upper)
[ "def", "trim", "(", "self", ",", "lower", "=", "None", ",", "upper", "=", "None", ")", ":", "if", "lower", "is", "None", ":", "lower", "=", "getattr", "(", "self", ".", "subpars", ".", "relwb", ",", "'value'", ",", "None", ")", "lland_parameters", ".", "ParameterSoil", ".", "trim", "(", "self", ",", "lower", ",", "upper", ")" ]
Trim upper values in accordance with :math:`RelWB \\leq RelWZ`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> lnk(ACKER) >>> relwb.values = 0.5 >>> relwz(0.2, 0.5, 0.8) >>> relwz relwz(0.5, 0.5, 0.8)
[ "Trim", "upper", "values", "in", "accordance", "with", ":", "math", ":", "RelWB", "\\\\", "leq", "RelWZ", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_control.py#L313-L327
hydpy-dev/hydpy
hydpy/models/lland/lland_control.py
RelWB.trim
def trim(self, lower=None, upper=None): """Trim upper values in accordance with :math:`RelWB \\leq RelWZ`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> lnk(ACKER) >>> relwz.values = 0.5 >>> relwb(0.2, 0.5, 0.8) >>> relwb relwb(0.2, 0.5, 0.5) """ if upper is None: upper = getattr(self.subpars.relwz, 'value', None) lland_parameters.ParameterSoil.trim(self, lower, upper)
python
def trim(self, lower=None, upper=None): """Trim upper values in accordance with :math:`RelWB \\leq RelWZ`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> lnk(ACKER) >>> relwz.values = 0.5 >>> relwb(0.2, 0.5, 0.8) >>> relwb relwb(0.2, 0.5, 0.5) """ if upper is None: upper = getattr(self.subpars.relwz, 'value', None) lland_parameters.ParameterSoil.trim(self, lower, upper)
[ "def", "trim", "(", "self", ",", "lower", "=", "None", ",", "upper", "=", "None", ")", ":", "if", "upper", "is", "None", ":", "upper", "=", "getattr", "(", "self", ".", "subpars", ".", "relwz", ",", "'value'", ",", "None", ")", "lland_parameters", ".", "ParameterSoil", ".", "trim", "(", "self", ",", "lower", ",", "upper", ")" ]
Trim upper values in accordance with :math:`RelWB \\leq RelWZ`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> nhru(3) >>> lnk(ACKER) >>> relwz.values = 0.5 >>> relwb(0.2, 0.5, 0.8) >>> relwb relwb(0.2, 0.5, 0.5)
[ "Trim", "upper", "values", "in", "accordance", "with", ":", "math", ":", "RelWB", "\\\\", "leq", "RelWZ", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_control.py#L336-L350
hydpy-dev/hydpy
hydpy/models/lland/lland_control.py
EQB.trim
def trim(self, lower=None, upper=None): """Trim upper values in accordance with :math:`EQI1 \\leq EQB`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> eqi1.value = 2.0 >>> eqb(1.0) >>> eqb eqb(2.0) >>> eqb(2.0) >>> eqb eqb(2.0) >>> eqb(3.0) >>> eqb eqb(3.0) """ if lower is None: lower = getattr(self.subpars.eqi1, 'value', None) super().trim(lower, upper)
python
def trim(self, lower=None, upper=None): """Trim upper values in accordance with :math:`EQI1 \\leq EQB`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> eqi1.value = 2.0 >>> eqb(1.0) >>> eqb eqb(2.0) >>> eqb(2.0) >>> eqb eqb(2.0) >>> eqb(3.0) >>> eqb eqb(3.0) """ if lower is None: lower = getattr(self.subpars.eqi1, 'value', None) super().trim(lower, upper)
[ "def", "trim", "(", "self", ",", "lower", "=", "None", ",", "upper", "=", "None", ")", ":", "if", "lower", "is", "None", ":", "lower", "=", "getattr", "(", "self", ".", "subpars", ".", "eqi1", ",", "'value'", ",", "None", ")", "super", "(", ")", ".", "trim", "(", "lower", ",", "upper", ")" ]
Trim upper values in accordance with :math:`EQI1 \\leq EQB`. >>> from hydpy.models.lland import * >>> parameterstep('1d') >>> eqi1.value = 2.0 >>> eqb(1.0) >>> eqb eqb(2.0) >>> eqb(2.0) >>> eqb eqb(2.0) >>> eqb(3.0) >>> eqb eqb(3.0)
[ "Trim", "upper", "values", "in", "accordance", "with", ":", "math", ":", "EQI1", "\\\\", "leq", "EQB", "." ]
train
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/models/lland/lland_control.py#L661-L679