repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
commonsense/metanl
metanl/extprocess.py
ProcessWrapper.tag_and_stem
def tag_and_stem(self, text, cache=None): """ Given some text, return a sequence of (stem, pos, text) triples as appropriate for the reader. `pos` can be as general or specific as necessary (for example, it might label all parts of speech, or it might only distinguish function words from others). Twitter-style hashtags and at-mentions have the stem and pos they would have without the leading # or @. For instance, if the reader's triple for "thing" is ('thing', 'NN', 'things'), then "#things" would come out as ('thing', 'NN', '#things'). """ analysis = self.analyze(text) triples = [] for record in analysis: root = self.get_record_root(record) token = self.get_record_token(record) if token: if unicode_is_punctuation(token): triples.append((token, '.', token)) else: pos = self.get_record_pos(record) triples.append((root, pos, token)) return triples
python
def tag_and_stem(self, text, cache=None): """ Given some text, return a sequence of (stem, pos, text) triples as appropriate for the reader. `pos` can be as general or specific as necessary (for example, it might label all parts of speech, or it might only distinguish function words from others). Twitter-style hashtags and at-mentions have the stem and pos they would have without the leading # or @. For instance, if the reader's triple for "thing" is ('thing', 'NN', 'things'), then "#things" would come out as ('thing', 'NN', '#things'). """ analysis = self.analyze(text) triples = [] for record in analysis: root = self.get_record_root(record) token = self.get_record_token(record) if token: if unicode_is_punctuation(token): triples.append((token, '.', token)) else: pos = self.get_record_pos(record) triples.append((root, pos, token)) return triples
[ "def", "tag_and_stem", "(", "self", ",", "text", ",", "cache", "=", "None", ")", ":", "analysis", "=", "self", ".", "analyze", "(", "text", ")", "triples", "=", "[", "]", "for", "record", "in", "analysis", ":", "root", "=", "self", ".", "get_record_root", "(", "record", ")", "token", "=", "self", ".", "get_record_token", "(", "record", ")", "if", "token", ":", "if", "unicode_is_punctuation", "(", "token", ")", ":", "triples", ".", "append", "(", "(", "token", ",", "'.'", ",", "token", ")", ")", "else", ":", "pos", "=", "self", ".", "get_record_pos", "(", "record", ")", "triples", ".", "append", "(", "(", "root", ",", "pos", ",", "token", ")", ")", "return", "triples" ]
Given some text, return a sequence of (stem, pos, text) triples as appropriate for the reader. `pos` can be as general or specific as necessary (for example, it might label all parts of speech, or it might only distinguish function words from others). Twitter-style hashtags and at-mentions have the stem and pos they would have without the leading # or @. For instance, if the reader's triple for "thing" is ('thing', 'NN', 'things'), then "#things" would come out as ('thing', 'NN', '#things').
[ "Given", "some", "text", "return", "a", "sequence", "of", "(", "stem", "pos", "text", ")", "triples", "as", "appropriate", "for", "the", "reader", ".", "pos", "can", "be", "as", "general", "or", "specific", "as", "necessary", "(", "for", "example", "it", "might", "label", "all", "parts", "of", "speech", "or", "it", "might", "only", "distinguish", "function", "words", "from", "others", ")", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L197-L222
commonsense/metanl
metanl/extprocess.py
ProcessWrapper.extract_phrases
def extract_phrases(self, text): """ Given some text, extract phrases of up to 2 content words, and map their normalized form to the complete phrase. """ analysis = self.analyze(text) for pos1 in range(len(analysis)): rec1 = analysis[pos1] if not self.is_stopword_record(rec1): yield self.get_record_root(rec1), rec1[0] for pos2 in range(pos1 + 1, len(analysis)): rec2 = analysis[pos2] if not self.is_stopword_record(rec2): roots = [self.get_record_root(rec1), self.get_record_root(rec2)] pieces = [analysis[i][0] for i in range(pos1, pos2+1)] term = ' '.join(roots) phrase = ''.join(pieces) yield term, phrase break
python
def extract_phrases(self, text): """ Given some text, extract phrases of up to 2 content words, and map their normalized form to the complete phrase. """ analysis = self.analyze(text) for pos1 in range(len(analysis)): rec1 = analysis[pos1] if not self.is_stopword_record(rec1): yield self.get_record_root(rec1), rec1[0] for pos2 in range(pos1 + 1, len(analysis)): rec2 = analysis[pos2] if not self.is_stopword_record(rec2): roots = [self.get_record_root(rec1), self.get_record_root(rec2)] pieces = [analysis[i][0] for i in range(pos1, pos2+1)] term = ' '.join(roots) phrase = ''.join(pieces) yield term, phrase break
[ "def", "extract_phrases", "(", "self", ",", "text", ")", ":", "analysis", "=", "self", ".", "analyze", "(", "text", ")", "for", "pos1", "in", "range", "(", "len", "(", "analysis", ")", ")", ":", "rec1", "=", "analysis", "[", "pos1", "]", "if", "not", "self", ".", "is_stopword_record", "(", "rec1", ")", ":", "yield", "self", ".", "get_record_root", "(", "rec1", ")", ",", "rec1", "[", "0", "]", "for", "pos2", "in", "range", "(", "pos1", "+", "1", ",", "len", "(", "analysis", ")", ")", ":", "rec2", "=", "analysis", "[", "pos2", "]", "if", "not", "self", ".", "is_stopword_record", "(", "rec2", ")", ":", "roots", "=", "[", "self", ".", "get_record_root", "(", "rec1", ")", ",", "self", ".", "get_record_root", "(", "rec2", ")", "]", "pieces", "=", "[", "analysis", "[", "i", "]", "[", "0", "]", "for", "i", "in", "range", "(", "pos1", ",", "pos2", "+", "1", ")", "]", "term", "=", "' '", ".", "join", "(", "roots", ")", "phrase", "=", "''", ".", "join", "(", "pieces", ")", "yield", "term", ",", "phrase", "break" ]
Given some text, extract phrases of up to 2 content words, and map their normalized form to the complete phrase.
[ "Given", "some", "text", "extract", "phrases", "of", "up", "to", "2", "content", "words", "and", "map", "their", "normalized", "form", "to", "the", "complete", "phrase", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/extprocess.py#L224-L243
commonsense/metanl
metanl/mecab.py
to_kana
def to_kana(text): """ Use MeCab to turn any text into its phonetic spelling, as katakana separated by spaces. """ records = MECAB.analyze(text) kana = [] for record in records: if record.pronunciation: kana.append(record.pronunciation) elif record.reading: kana.append(record.reading) else: kana.append(record.surface) return ' '.join(k for k in kana if k)
python
def to_kana(text): """ Use MeCab to turn any text into its phonetic spelling, as katakana separated by spaces. """ records = MECAB.analyze(text) kana = [] for record in records: if record.pronunciation: kana.append(record.pronunciation) elif record.reading: kana.append(record.reading) else: kana.append(record.surface) return ' '.join(k for k in kana if k)
[ "def", "to_kana", "(", "text", ")", ":", "records", "=", "MECAB", ".", "analyze", "(", "text", ")", "kana", "=", "[", "]", "for", "record", "in", "records", ":", "if", "record", ".", "pronunciation", ":", "kana", ".", "append", "(", "record", ".", "pronunciation", ")", "elif", "record", ".", "reading", ":", "kana", ".", "append", "(", "record", ".", "reading", ")", "else", ":", "kana", ".", "append", "(", "record", ".", "surface", ")", "return", "' '", ".", "join", "(", "k", "for", "k", "in", "kana", "if", "k", ")" ]
Use MeCab to turn any text into its phonetic spelling, as katakana separated by spaces.
[ "Use", "MeCab", "to", "turn", "any", "text", "into", "its", "phonetic", "spelling", "as", "katakana", "separated", "by", "spaces", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/mecab.py#L208-L222
commonsense/metanl
metanl/mecab.py
get_kana_info
def get_kana_info(char): """ Return two things about each character: - Its transliterated value (in Roman characters, if it's a kana) - A class of characters indicating how it affects the romanization """ try: name = unicodedata.name(char) except ValueError: return char, NOT_KANA # The names we're dealing with will probably look like # "KATAKANA CHARACTER ZI". if (name.startswith('HIRAGANA LETTER') or name.startswith('KATAKANA LETTER') or name.startswith('KATAKANA-HIRAGANA')): names = name.split() syllable = str_func(names[-1].lower()) if name.endswith('SMALL TU'): # The small tsu (っ) doubles the following consonant. # It'll show up as 't' on its own. return 't', SMALL_TSU elif names[-1] == 'N': return 'n', NN elif names[1] == 'PROLONGED': # The prolongation marker doubles the previous vowel. # It'll show up as '_' on its own. return '_', PROLONG elif names[-2] == 'SMALL': # Small characters tend to modify the sound of the previous # kana. If they can't modify anything, they're appended to # the letter 'x' instead. if syllable.startswith('y'): return 'x' + syllable, SMALL_Y else: return 'x' + syllable, SMALL return syllable, KANA else: if char in ROMAN_PUNCTUATION_TABLE: char = ROMAN_PUNCTUATION_TABLE[char] return char, NOT_KANA
python
def get_kana_info(char): """ Return two things about each character: - Its transliterated value (in Roman characters, if it's a kana) - A class of characters indicating how it affects the romanization """ try: name = unicodedata.name(char) except ValueError: return char, NOT_KANA # The names we're dealing with will probably look like # "KATAKANA CHARACTER ZI". if (name.startswith('HIRAGANA LETTER') or name.startswith('KATAKANA LETTER') or name.startswith('KATAKANA-HIRAGANA')): names = name.split() syllable = str_func(names[-1].lower()) if name.endswith('SMALL TU'): # The small tsu (っ) doubles the following consonant. # It'll show up as 't' on its own. return 't', SMALL_TSU elif names[-1] == 'N': return 'n', NN elif names[1] == 'PROLONGED': # The prolongation marker doubles the previous vowel. # It'll show up as '_' on its own. return '_', PROLONG elif names[-2] == 'SMALL': # Small characters tend to modify the sound of the previous # kana. If they can't modify anything, they're appended to # the letter 'x' instead. if syllable.startswith('y'): return 'x' + syllable, SMALL_Y else: return 'x' + syllable, SMALL return syllable, KANA else: if char in ROMAN_PUNCTUATION_TABLE: char = ROMAN_PUNCTUATION_TABLE[char] return char, NOT_KANA
[ "def", "get_kana_info", "(", "char", ")", ":", "try", ":", "name", "=", "unicodedata", ".", "name", "(", "char", ")", "except", "ValueError", ":", "return", "char", ",", "NOT_KANA", "# The names we're dealing with will probably look like", "# \"KATAKANA CHARACTER ZI\".", "if", "(", "name", ".", "startswith", "(", "'HIRAGANA LETTER'", ")", "or", "name", ".", "startswith", "(", "'KATAKANA LETTER'", ")", "or", "name", ".", "startswith", "(", "'KATAKANA-HIRAGANA'", ")", ")", ":", "names", "=", "name", ".", "split", "(", ")", "syllable", "=", "str_func", "(", "names", "[", "-", "1", "]", ".", "lower", "(", ")", ")", "if", "name", ".", "endswith", "(", "'SMALL TU'", ")", ":", "# The small tsu (っ) doubles the following consonant.", "# It'll show up as 't' on its own.", "return", "'t'", ",", "SMALL_TSU", "elif", "names", "[", "-", "1", "]", "==", "'N'", ":", "return", "'n'", ",", "NN", "elif", "names", "[", "1", "]", "==", "'PROLONGED'", ":", "# The prolongation marker doubles the previous vowel.", "# It'll show up as '_' on its own.", "return", "'_'", ",", "PROLONG", "elif", "names", "[", "-", "2", "]", "==", "'SMALL'", ":", "# Small characters tend to modify the sound of the previous", "# kana. If they can't modify anything, they're appended to", "# the letter 'x' instead.", "if", "syllable", ".", "startswith", "(", "'y'", ")", ":", "return", "'x'", "+", "syllable", ",", "SMALL_Y", "else", ":", "return", "'x'", "+", "syllable", ",", "SMALL", "return", "syllable", ",", "KANA", "else", ":", "if", "char", "in", "ROMAN_PUNCTUATION_TABLE", ":", "char", "=", "ROMAN_PUNCTUATION_TABLE", "[", "char", "]", "return", "char", ",", "NOT_KANA" ]
Return two things about each character: - Its transliterated value (in Roman characters, if it's a kana) - A class of characters indicating how it affects the romanization
[ "Return", "two", "things", "about", "each", "character", ":" ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/mecab.py#L225-L268
commonsense/metanl
metanl/mecab.py
MeCabWrapper.analyze
def analyze(self, text): """ Runs a line of text through MeCab, and returns the results as a list of lists ("records") that contain the MeCab analysis of each word. """ try: self.process # make sure things are loaded text = render_safe(text).replace('\n', ' ').lower() results = [] for chunk in string_pieces(text): self.send_input((chunk + '\n').encode('utf-8')) while True: out_line = self.receive_output_line().decode('utf-8') if out_line == 'EOS\n': break word, info = out_line.strip('\n').split('\t') record_parts = [word] + info.split(',') # Pad the record out to have 10 parts if it doesn't record_parts += [None] * (10 - len(record_parts)) record = MeCabRecord(*record_parts) # special case for detecting nai -> n if (record.surface == 'γ‚“' and record.conjugation == 'δΈε€‰εŒ–εž‹'): # rebuild the record so that record.root is 'nai' record_parts[MeCabRecord._fields.index('root')] = 'γͺい' record = MeCabRecord(*record_parts) results.append(record) return results except ProcessError: self.restart_process() return self.analyze(text)
python
def analyze(self, text): """ Runs a line of text through MeCab, and returns the results as a list of lists ("records") that contain the MeCab analysis of each word. """ try: self.process # make sure things are loaded text = render_safe(text).replace('\n', ' ').lower() results = [] for chunk in string_pieces(text): self.send_input((chunk + '\n').encode('utf-8')) while True: out_line = self.receive_output_line().decode('utf-8') if out_line == 'EOS\n': break word, info = out_line.strip('\n').split('\t') record_parts = [word] + info.split(',') # Pad the record out to have 10 parts if it doesn't record_parts += [None] * (10 - len(record_parts)) record = MeCabRecord(*record_parts) # special case for detecting nai -> n if (record.surface == 'γ‚“' and record.conjugation == 'δΈε€‰εŒ–εž‹'): # rebuild the record so that record.root is 'nai' record_parts[MeCabRecord._fields.index('root')] = 'γͺい' record = MeCabRecord(*record_parts) results.append(record) return results except ProcessError: self.restart_process() return self.analyze(text)
[ "def", "analyze", "(", "self", ",", "text", ")", ":", "try", ":", "self", ".", "process", "# make sure things are loaded", "text", "=", "render_safe", "(", "text", ")", ".", "replace", "(", "'\\n'", ",", "' '", ")", ".", "lower", "(", ")", "results", "=", "[", "]", "for", "chunk", "in", "string_pieces", "(", "text", ")", ":", "self", ".", "send_input", "(", "(", "chunk", "+", "'\\n'", ")", ".", "encode", "(", "'utf-8'", ")", ")", "while", "True", ":", "out_line", "=", "self", ".", "receive_output_line", "(", ")", ".", "decode", "(", "'utf-8'", ")", "if", "out_line", "==", "'EOS\\n'", ":", "break", "word", ",", "info", "=", "out_line", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", "'\\t'", ")", "record_parts", "=", "[", "word", "]", "+", "info", ".", "split", "(", "','", ")", "# Pad the record out to have 10 parts if it doesn't", "record_parts", "+=", "[", "None", "]", "*", "(", "10", "-", "len", "(", "record_parts", ")", ")", "record", "=", "MeCabRecord", "(", "*", "record_parts", ")", "# special case for detecting nai -> n", "if", "(", "record", ".", "surface", "==", "'γ‚“' a", "d", "record", ".", "conjugation", "==", "'δΈε€‰εŒ–εž‹'):", "", "", "# rebuild the record so that record.root is 'nai'", "record_parts", "[", "MeCabRecord", ".", "_fields", ".", "index", "(", "'root'", ")", "]", "=", "'γͺい'", "record", "=", "MeCabRecord", "(", "*", "record_parts", ")", "results", ".", "append", "(", "record", ")", "return", "results", "except", "ProcessError", ":", "self", ".", "restart_process", "(", ")", "return", "self", ".", "analyze", "(", "text", ")" ]
Runs a line of text through MeCab, and returns the results as a list of lists ("records") that contain the MeCab analysis of each word.
[ "Runs", "a", "line", "of", "text", "through", "MeCab", "and", "returns", "the", "results", "as", "a", "list", "of", "lists", "(", "records", ")", "that", "contain", "the", "MeCab", "analysis", "of", "each", "word", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/mecab.py#L125-L160
commonsense/metanl
metanl/mecab.py
MeCabWrapper.is_stopword_record
def is_stopword_record(self, record): """ Determine whether a single MeCab record represents a stopword. This mostly determines words to strip based on their parts of speech. If common_words is set to True (default), it will also strip common verbs and nouns such as くる and γ‚ˆγ†. If more_stopwords is True, it will look at the sub-part of speech to remove more categories. """ # preserve negations if record.root == 'γͺい': return False return ( record.pos in STOPWORD_CATEGORIES or record.subclass1 in STOPWORD_CATEGORIES or record.root in STOPWORD_ROOTS )
python
def is_stopword_record(self, record): """ Determine whether a single MeCab record represents a stopword. This mostly determines words to strip based on their parts of speech. If common_words is set to True (default), it will also strip common verbs and nouns such as くる and γ‚ˆγ†. If more_stopwords is True, it will look at the sub-part of speech to remove more categories. """ # preserve negations if record.root == 'γͺい': return False return ( record.pos in STOPWORD_CATEGORIES or record.subclass1 in STOPWORD_CATEGORIES or record.root in STOPWORD_ROOTS )
[ "def", "is_stopword_record", "(", "self", ",", "record", ")", ":", "# preserve negations", "if", "record", ".", "root", "==", "'γͺい':", "", "return", "False", "return", "(", "record", ".", "pos", "in", "STOPWORD_CATEGORIES", "or", "record", ".", "subclass1", "in", "STOPWORD_CATEGORIES", "or", "record", ".", "root", "in", "STOPWORD_ROOTS", ")" ]
Determine whether a single MeCab record represents a stopword. This mostly determines words to strip based on their parts of speech. If common_words is set to True (default), it will also strip common verbs and nouns such as くる and γ‚ˆγ†. If more_stopwords is True, it will look at the sub-part of speech to remove more categories.
[ "Determine", "whether", "a", "single", "MeCab", "record", "represents", "a", "stopword", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/mecab.py#L162-L178
commonsense/metanl
metanl/mecab.py
MeCabWrapper.get_record_pos
def get_record_pos(self, record): """ Given a record, get the word's part of speech. Here we're going to return MeCab's part of speech (written in Japanese), though if it's a stopword we prefix the part of speech with '~'. """ if self.is_stopword_record(record): return '~' + record.pos else: return record.pos
python
def get_record_pos(self, record): """ Given a record, get the word's part of speech. Here we're going to return MeCab's part of speech (written in Japanese), though if it's a stopword we prefix the part of speech with '~'. """ if self.is_stopword_record(record): return '~' + record.pos else: return record.pos
[ "def", "get_record_pos", "(", "self", ",", "record", ")", ":", "if", "self", ".", "is_stopword_record", "(", "record", ")", ":", "return", "'~'", "+", "record", ".", "pos", "else", ":", "return", "record", ".", "pos" ]
Given a record, get the word's part of speech. Here we're going to return MeCab's part of speech (written in Japanese), though if it's a stopword we prefix the part of speech with '~'.
[ "Given", "a", "record", "get", "the", "word", "s", "part", "of", "speech", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/mecab.py#L180-L191
commonsense/metanl
metanl/freeling.py
FreelingWrapper.analyze
def analyze(self, text): """ Run text through the external process, and get a list of lists ("records") that contain the analysis of each word. """ try: text = render_safe(text).strip() if not text: return [] chunks = text.split('\n') results = [] for chunk_text in chunks: if chunk_text.strip(): textbytes = (chunk_text + '\n').encode('utf-8') self.send_input(textbytes) out_line = '' while True: out_line = self.receive_output_line() out_line = out_line.decode('utf-8') if out_line == '\n': break record = out_line.strip('\n').split(' ') results.append(record) return results except ProcessError: self.restart_process() return self.analyze(text)
python
def analyze(self, text): """ Run text through the external process, and get a list of lists ("records") that contain the analysis of each word. """ try: text = render_safe(text).strip() if not text: return [] chunks = text.split('\n') results = [] for chunk_text in chunks: if chunk_text.strip(): textbytes = (chunk_text + '\n').encode('utf-8') self.send_input(textbytes) out_line = '' while True: out_line = self.receive_output_line() out_line = out_line.decode('utf-8') if out_line == '\n': break record = out_line.strip('\n').split(' ') results.append(record) return results except ProcessError: self.restart_process() return self.analyze(text)
[ "def", "analyze", "(", "self", ",", "text", ")", ":", "try", ":", "text", "=", "render_safe", "(", "text", ")", ".", "strip", "(", ")", "if", "not", "text", ":", "return", "[", "]", "chunks", "=", "text", ".", "split", "(", "'\\n'", ")", "results", "=", "[", "]", "for", "chunk_text", "in", "chunks", ":", "if", "chunk_text", ".", "strip", "(", ")", ":", "textbytes", "=", "(", "chunk_text", "+", "'\\n'", ")", ".", "encode", "(", "'utf-8'", ")", "self", ".", "send_input", "(", "textbytes", ")", "out_line", "=", "''", "while", "True", ":", "out_line", "=", "self", ".", "receive_output_line", "(", ")", "out_line", "=", "out_line", ".", "decode", "(", "'utf-8'", ")", "if", "out_line", "==", "'\\n'", ":", "break", "record", "=", "out_line", ".", "strip", "(", "'\\n'", ")", ".", "split", "(", "' '", ")", "results", ".", "append", "(", "record", ")", "return", "results", "except", "ProcessError", ":", "self", ".", "restart_process", "(", ")", "return", "self", ".", "analyze", "(", "text", ")" ]
Run text through the external process, and get a list of lists ("records") that contain the analysis of each word.
[ "Run", "text", "through", "the", "external", "process", "and", "get", "a", "list", "of", "lists", "(", "records", ")", "that", "contain", "the", "analysis", "of", "each", "word", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/freeling.py#L76-L104
commonsense/metanl
metanl/token_utils.py
untokenize
def untokenize(words): """ Untokenizing a text undoes the tokenizing operation, restoring punctuation and spaces to the places that people expect them to be. Ideally, `untokenize(tokenize(text))` should be identical to `text`, except for line breaks. """ text = ' '.join(words) step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .', '...') step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3) step5 = step4.replace(" '", "'").replace(" n't", "n't").replace( "can not", "cannot") step6 = step5.replace(" ` ", " '") return step6.strip()
python
def untokenize(words): """ Untokenizing a text undoes the tokenizing operation, restoring punctuation and spaces to the places that people expect them to be. Ideally, `untokenize(tokenize(text))` should be identical to `text`, except for line breaks. """ text = ' '.join(words) step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .', '...') step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3) step5 = step4.replace(" '", "'").replace(" n't", "n't").replace( "can not", "cannot") step6 = step5.replace(" ` ", " '") return step6.strip()
[ "def", "untokenize", "(", "words", ")", ":", "text", "=", "' '", ".", "join", "(", "words", ")", "step1", "=", "text", ".", "replace", "(", "\"`` \"", ",", "'\"'", ")", ".", "replace", "(", "\" ''\"", ",", "'\"'", ")", ".", "replace", "(", "'. . .'", ",", "'...'", ")", "step2", "=", "step1", ".", "replace", "(", "\" ( \"", ",", "\" (\"", ")", ".", "replace", "(", "\" ) \"", ",", "\") \"", ")", "step3", "=", "re", ".", "sub", "(", "r' ([.,:;?!%]+)([ \\'\"`])'", ",", "r\"\\1\\2\"", ",", "step2", ")", "step4", "=", "re", ".", "sub", "(", "r' ([.,:;?!%]+)$'", ",", "r\"\\1\"", ",", "step3", ")", "step5", "=", "step4", ".", "replace", "(", "\" '\"", ",", "\"'\"", ")", ".", "replace", "(", "\" n't\"", ",", "\"n't\"", ")", ".", "replace", "(", "\"can not\"", ",", "\"cannot\"", ")", "step6", "=", "step5", ".", "replace", "(", "\" ` \"", ",", "\" '\"", ")", "return", "step6", ".", "strip", "(", ")" ]
Untokenizing a text undoes the tokenizing operation, restoring punctuation and spaces to the places that people expect them to be. Ideally, `untokenize(tokenize(text))` should be identical to `text`, except for line breaks.
[ "Untokenizing", "a", "text", "undoes", "the", "tokenizing", "operation", "restoring", "punctuation", "and", "spaces", "to", "the", "places", "that", "people", "expect", "them", "to", "be", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/token_utils.py#L28-L44
commonsense/metanl
metanl/token_utils.py
un_camel_case
def un_camel_case(text): r""" Splits apart words that are written in CamelCase. Bugs: - Non-ASCII characters are treated as lowercase letters, even if they are actually capital letters. Examples: >>> un_camel_case('1984ZXSpectrumGames') '1984 ZX Spectrum Games' >>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA') 'aa Aa aa Aa A 0a A AA Aa! AAA' >>> un_camel_case('MotΓΆrHead') 'Mot\xf6r Head' >>> un_camel_case('MSWindows3.11ForWorkgroups') 'MS Windows 3.11 For Workgroups' This should not significantly affect text that is not camel-cased: >>> un_camel_case('ACM_Computing_Classification_System') 'ACM Computing Classification System' >>> un_camel_case('Anne_Blunt,_15th_Baroness_Wentworth') 'Anne Blunt, 15th Baroness Wentworth' >>> un_camel_case('Hindi-Urdu') 'Hindi-Urdu' """ revtext = text[::-1] pieces = [] while revtext: match = CAMEL_RE.match(revtext) if match: pieces.append(match.group(1)) revtext = revtext[match.end():] else: pieces.append(revtext) revtext = '' revstr = ' '.join(piece.strip(' _') for piece in pieces if piece.strip(' _')) return revstr[::-1].replace('- ', '-')
python
def un_camel_case(text): r""" Splits apart words that are written in CamelCase. Bugs: - Non-ASCII characters are treated as lowercase letters, even if they are actually capital letters. Examples: >>> un_camel_case('1984ZXSpectrumGames') '1984 ZX Spectrum Games' >>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA') 'aa Aa aa Aa A 0a A AA Aa! AAA' >>> un_camel_case('MotΓΆrHead') 'Mot\xf6r Head' >>> un_camel_case('MSWindows3.11ForWorkgroups') 'MS Windows 3.11 For Workgroups' This should not significantly affect text that is not camel-cased: >>> un_camel_case('ACM_Computing_Classification_System') 'ACM Computing Classification System' >>> un_camel_case('Anne_Blunt,_15th_Baroness_Wentworth') 'Anne Blunt, 15th Baroness Wentworth' >>> un_camel_case('Hindi-Urdu') 'Hindi-Urdu' """ revtext = text[::-1] pieces = [] while revtext: match = CAMEL_RE.match(revtext) if match: pieces.append(match.group(1)) revtext = revtext[match.end():] else: pieces.append(revtext) revtext = '' revstr = ' '.join(piece.strip(' _') for piece in pieces if piece.strip(' _')) return revstr[::-1].replace('- ', '-')
[ "def", "un_camel_case", "(", "text", ")", ":", "revtext", "=", "text", "[", ":", ":", "-", "1", "]", "pieces", "=", "[", "]", "while", "revtext", ":", "match", "=", "CAMEL_RE", ".", "match", "(", "revtext", ")", "if", "match", ":", "pieces", ".", "append", "(", "match", ".", "group", "(", "1", ")", ")", "revtext", "=", "revtext", "[", "match", ".", "end", "(", ")", ":", "]", "else", ":", "pieces", ".", "append", "(", "revtext", ")", "revtext", "=", "''", "revstr", "=", "' '", ".", "join", "(", "piece", ".", "strip", "(", "' _'", ")", "for", "piece", "in", "pieces", "if", "piece", ".", "strip", "(", "' _'", ")", ")", "return", "revstr", "[", ":", ":", "-", "1", "]", ".", "replace", "(", "'- '", ",", "'-'", ")" ]
r""" Splits apart words that are written in CamelCase. Bugs: - Non-ASCII characters are treated as lowercase letters, even if they are actually capital letters. Examples: >>> un_camel_case('1984ZXSpectrumGames') '1984 ZX Spectrum Games' >>> un_camel_case('aaAa aaAaA 0aA AAAa!AAA') 'aa Aa aa Aa A 0a A AA Aa! AAA' >>> un_camel_case('MotΓΆrHead') 'Mot\xf6r Head' >>> un_camel_case('MSWindows3.11ForWorkgroups') 'MS Windows 3.11 For Workgroups' This should not significantly affect text that is not camel-cased: >>> un_camel_case('ACM_Computing_Classification_System') 'ACM Computing Classification System' >>> un_camel_case('Anne_Blunt,_15th_Baroness_Wentworth') 'Anne Blunt, 15th Baroness Wentworth' >>> un_camel_case('Hindi-Urdu') 'Hindi-Urdu'
[ "r", "Splits", "apart", "words", "that", "are", "written", "in", "CamelCase", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/token_utils.py#L64-L110
commonsense/metanl
metanl/token_utils.py
string_pieces
def string_pieces(s, maxlen=1024): """ Takes a (unicode) string and yields pieces of it that are at most `maxlen` characters, trying to break it at punctuation/whitespace. This is an important step before using a tokenizer with a maximum buffer size. """ if not s: return i = 0 while True: j = i + maxlen if j >= len(s): yield s[i:] return # Using "j - 1" keeps boundary characters with the left chunk while unicodedata.category(s[j - 1]) not in BOUNDARY_CATEGORIES: j -= 1 if j == i: # No boundary available; oh well. j = i + maxlen break yield s[i:j] i = j
python
def string_pieces(s, maxlen=1024): """ Takes a (unicode) string and yields pieces of it that are at most `maxlen` characters, trying to break it at punctuation/whitespace. This is an important step before using a tokenizer with a maximum buffer size. """ if not s: return i = 0 while True: j = i + maxlen if j >= len(s): yield s[i:] return # Using "j - 1" keeps boundary characters with the left chunk while unicodedata.category(s[j - 1]) not in BOUNDARY_CATEGORIES: j -= 1 if j == i: # No boundary available; oh well. j = i + maxlen break yield s[i:j] i = j
[ "def", "string_pieces", "(", "s", ",", "maxlen", "=", "1024", ")", ":", "if", "not", "s", ":", "return", "i", "=", "0", "while", "True", ":", "j", "=", "i", "+", "maxlen", "if", "j", ">=", "len", "(", "s", ")", ":", "yield", "s", "[", "i", ":", "]", "return", "# Using \"j - 1\" keeps boundary characters with the left chunk", "while", "unicodedata", ".", "category", "(", "s", "[", "j", "-", "1", "]", ")", "not", "in", "BOUNDARY_CATEGORIES", ":", "j", "-=", "1", "if", "j", "==", "i", ":", "# No boundary available; oh well.", "j", "=", "i", "+", "maxlen", "break", "yield", "s", "[", "i", ":", "j", "]", "i", "=", "j" ]
Takes a (unicode) string and yields pieces of it that are at most `maxlen` characters, trying to break it at punctuation/whitespace. This is an important step before using a tokenizer with a maximum buffer size.
[ "Takes", "a", "(", "unicode", ")", "string", "and", "yields", "pieces", "of", "it", "that", "are", "at", "most", "maxlen", "characters", "trying", "to", "break", "it", "at", "punctuation", "/", "whitespace", ".", "This", "is", "an", "important", "step", "before", "using", "a", "tokenizer", "with", "a", "maximum", "buffer", "size", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/token_utils.py#L128-L150
commonsense/metanl
metanl/nltk_morphy.py
_word_badness
def _word_badness(word): """ Assign a heuristic to possible outputs from Morphy. Minimizing this heuristic avoids incorrect stems. """ if word.endswith('e'): return len(word) - 2 elif word.endswith('ess'): return len(word) - 10 elif word.endswith('ss'): return len(word) - 4 else: return len(word)
python
def _word_badness(word): """ Assign a heuristic to possible outputs from Morphy. Minimizing this heuristic avoids incorrect stems. """ if word.endswith('e'): return len(word) - 2 elif word.endswith('ess'): return len(word) - 10 elif word.endswith('ss'): return len(word) - 4 else: return len(word)
[ "def", "_word_badness", "(", "word", ")", ":", "if", "word", ".", "endswith", "(", "'e'", ")", ":", "return", "len", "(", "word", ")", "-", "2", "elif", "word", ".", "endswith", "(", "'ess'", ")", ":", "return", "len", "(", "word", ")", "-", "10", "elif", "word", ".", "endswith", "(", "'ss'", ")", ":", "return", "len", "(", "word", ")", "-", "4", "else", ":", "return", "len", "(", "word", ")" ]
Assign a heuristic to possible outputs from Morphy. Minimizing this heuristic avoids incorrect stems.
[ "Assign", "a", "heuristic", "to", "possible", "outputs", "from", "Morphy", ".", "Minimizing", "this", "heuristic", "avoids", "incorrect", "stems", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/nltk_morphy.py#L88-L100
commonsense/metanl
metanl/nltk_morphy.py
_morphy_best
def _morphy_best(word, pos=None): """ Get the most likely stem for a word using Morphy, once the input has been pre-processed by morphy_stem(). """ results = [] if pos is None: pos = 'nvar' for pos_item in pos: results.extend(morphy(word, pos_item)) if not results: return None results.sort(key=lambda x: _word_badness(x)) return results[0]
python
def _morphy_best(word, pos=None): """ Get the most likely stem for a word using Morphy, once the input has been pre-processed by morphy_stem(). """ results = [] if pos is None: pos = 'nvar' for pos_item in pos: results.extend(morphy(word, pos_item)) if not results: return None results.sort(key=lambda x: _word_badness(x)) return results[0]
[ "def", "_morphy_best", "(", "word", ",", "pos", "=", "None", ")", ":", "results", "=", "[", "]", "if", "pos", "is", "None", ":", "pos", "=", "'nvar'", "for", "pos_item", "in", "pos", ":", "results", ".", "extend", "(", "morphy", "(", "word", ",", "pos_item", ")", ")", "if", "not", "results", ":", "return", "None", "results", ".", "sort", "(", "key", "=", "lambda", "x", ":", "_word_badness", "(", "x", ")", ")", "return", "results", "[", "0", "]" ]
Get the most likely stem for a word using Morphy, once the input has been pre-processed by morphy_stem().
[ "Get", "the", "most", "likely", "stem", "for", "a", "word", "using", "Morphy", "once", "the", "input", "has", "been", "pre", "-", "processed", "by", "morphy_stem", "()", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/nltk_morphy.py#L103-L116
commonsense/metanl
metanl/nltk_morphy.py
morphy_stem
def morphy_stem(word, pos=None): """ Get the most likely stem for a word. If a part of speech is supplied, the stem will be more accurate. Valid parts of speech are: - 'n' or 'NN' for nouns - 'v' or 'VB' for verbs - 'a' or 'JJ' for adjectives - 'r' or 'RB' for adverbs Any other part of speech will be treated as unknown. """ word = word.lower() if pos is not None: if pos.startswith('NN'): pos = 'n' elif pos.startswith('VB'): pos = 'v' elif pos.startswith('JJ'): pos = 'a' elif pos.startswith('RB'): pos = 'r' if pos is None and word.endswith('ing') or word.endswith('ed'): pos = 'v' if pos is not None and pos not in 'nvar': pos = None if word in EXCEPTIONS: return EXCEPTIONS[word] if pos is None: if word in AMBIGUOUS_EXCEPTIONS: return AMBIGUOUS_EXCEPTIONS[word] return _morphy_best(word, pos) or word
python
def morphy_stem(word, pos=None): """ Get the most likely stem for a word. If a part of speech is supplied, the stem will be more accurate. Valid parts of speech are: - 'n' or 'NN' for nouns - 'v' or 'VB' for verbs - 'a' or 'JJ' for adjectives - 'r' or 'RB' for adverbs Any other part of speech will be treated as unknown. """ word = word.lower() if pos is not None: if pos.startswith('NN'): pos = 'n' elif pos.startswith('VB'): pos = 'v' elif pos.startswith('JJ'): pos = 'a' elif pos.startswith('RB'): pos = 'r' if pos is None and word.endswith('ing') or word.endswith('ed'): pos = 'v' if pos is not None and pos not in 'nvar': pos = None if word in EXCEPTIONS: return EXCEPTIONS[word] if pos is None: if word in AMBIGUOUS_EXCEPTIONS: return AMBIGUOUS_EXCEPTIONS[word] return _morphy_best(word, pos) or word
[ "def", "morphy_stem", "(", "word", ",", "pos", "=", "None", ")", ":", "word", "=", "word", ".", "lower", "(", ")", "if", "pos", "is", "not", "None", ":", "if", "pos", ".", "startswith", "(", "'NN'", ")", ":", "pos", "=", "'n'", "elif", "pos", ".", "startswith", "(", "'VB'", ")", ":", "pos", "=", "'v'", "elif", "pos", ".", "startswith", "(", "'JJ'", ")", ":", "pos", "=", "'a'", "elif", "pos", ".", "startswith", "(", "'RB'", ")", ":", "pos", "=", "'r'", "if", "pos", "is", "None", "and", "word", ".", "endswith", "(", "'ing'", ")", "or", "word", ".", "endswith", "(", "'ed'", ")", ":", "pos", "=", "'v'", "if", "pos", "is", "not", "None", "and", "pos", "not", "in", "'nvar'", ":", "pos", "=", "None", "if", "word", "in", "EXCEPTIONS", ":", "return", "EXCEPTIONS", "[", "word", "]", "if", "pos", "is", "None", ":", "if", "word", "in", "AMBIGUOUS_EXCEPTIONS", ":", "return", "AMBIGUOUS_EXCEPTIONS", "[", "word", "]", "return", "_morphy_best", "(", "word", ",", "pos", ")", "or", "word" ]
Get the most likely stem for a word. If a part of speech is supplied, the stem will be more accurate. Valid parts of speech are: - 'n' or 'NN' for nouns - 'v' or 'VB' for verbs - 'a' or 'JJ' for adjectives - 'r' or 'RB' for adverbs Any other part of speech will be treated as unknown.
[ "Get", "the", "most", "likely", "stem", "for", "a", "word", ".", "If", "a", "part", "of", "speech", "is", "supplied", "the", "stem", "will", "be", "more", "accurate", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/nltk_morphy.py#L119-L152
commonsense/metanl
metanl/nltk_morphy.py
tag_and_stem
def tag_and_stem(text): """ Returns a list of (stem, tag, token) triples: - stem: the word's uninflected form - tag: the word's part of speech - token: the original word, so we can reconstruct it later """ tokens = tokenize(text) tagged = nltk.pos_tag(tokens) out = [] for token, tag in tagged: stem = morphy_stem(token, tag) out.append((stem, tag, token)) return out
python
def tag_and_stem(text): """ Returns a list of (stem, tag, token) triples: - stem: the word's uninflected form - tag: the word's part of speech - token: the original word, so we can reconstruct it later """ tokens = tokenize(text) tagged = nltk.pos_tag(tokens) out = [] for token, tag in tagged: stem = morphy_stem(token, tag) out.append((stem, tag, token)) return out
[ "def", "tag_and_stem", "(", "text", ")", ":", "tokens", "=", "tokenize", "(", "text", ")", "tagged", "=", "nltk", ".", "pos_tag", "(", "tokens", ")", "out", "=", "[", "]", "for", "token", ",", "tag", "in", "tagged", ":", "stem", "=", "morphy_stem", "(", "token", ",", "tag", ")", "out", ".", "append", "(", "(", "stem", ",", "tag", ",", "token", ")", ")", "return", "out" ]
Returns a list of (stem, tag, token) triples: - stem: the word's uninflected form - tag: the word's part of speech - token: the original word, so we can reconstruct it later
[ "Returns", "a", "list", "of", "(", "stem", "tag", "token", ")", "triples", ":" ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/nltk_morphy.py#L155-L169
commonsense/metanl
metanl/nltk_morphy.py
normalize_list
def normalize_list(text): """ Get a list of word stems that appear in the text. Stopwords and an initial 'to' will be stripped, unless this leaves nothing in the stem. >>> normalize_list('the dog') ['dog'] >>> normalize_list('big dogs') ['big', 'dog'] >>> normalize_list('the') ['the'] """ pieces = [morphy_stem(word) for word in tokenize(text)] pieces = [piece for piece in pieces if good_lemma(piece)] if not pieces: return [text] if pieces[0] == 'to': pieces = pieces[1:] return pieces
python
def normalize_list(text): """ Get a list of word stems that appear in the text. Stopwords and an initial 'to' will be stripped, unless this leaves nothing in the stem. >>> normalize_list('the dog') ['dog'] >>> normalize_list('big dogs') ['big', 'dog'] >>> normalize_list('the') ['the'] """ pieces = [morphy_stem(word) for word in tokenize(text)] pieces = [piece for piece in pieces if good_lemma(piece)] if not pieces: return [text] if pieces[0] == 'to': pieces = pieces[1:] return pieces
[ "def", "normalize_list", "(", "text", ")", ":", "pieces", "=", "[", "morphy_stem", "(", "word", ")", "for", "word", "in", "tokenize", "(", "text", ")", "]", "pieces", "=", "[", "piece", "for", "piece", "in", "pieces", "if", "good_lemma", "(", "piece", ")", "]", "if", "not", "pieces", ":", "return", "[", "text", "]", "if", "pieces", "[", "0", "]", "==", "'to'", ":", "pieces", "=", "pieces", "[", "1", ":", "]", "return", "pieces" ]
Get a list of word stems that appear in the text. Stopwords and an initial 'to' will be stripped, unless this leaves nothing in the stem. >>> normalize_list('the dog') ['dog'] >>> normalize_list('big dogs') ['big', 'dog'] >>> normalize_list('the') ['the']
[ "Get", "a", "list", "of", "word", "stems", "that", "appear", "in", "the", "text", ".", "Stopwords", "and", "an", "initial", "to", "will", "be", "stripped", "unless", "this", "leaves", "nothing", "in", "the", "stem", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/nltk_morphy.py#L176-L194
commonsense/metanl
metanl/nltk_morphy.py
normalize_topic
def normalize_topic(topic): """ Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None. """ # find titles of the form Foo (bar) topic = topic.replace('_', ' ') match = re.match(r'([^(]+) \(([^)]+)\)', topic) if not match: return normalize(topic), None else: return normalize(match.group(1)), 'n/' + match.group(2).strip(' _')
python
def normalize_topic(topic): """ Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None. """ # find titles of the form Foo (bar) topic = topic.replace('_', ' ') match = re.match(r'([^(]+) \(([^)]+)\)', topic) if not match: return normalize(topic), None else: return normalize(match.group(1)), 'n/' + match.group(2).strip(' _')
[ "def", "normalize_topic", "(", "topic", ")", ":", "# find titles of the form Foo (bar)", "topic", "=", "topic", ".", "replace", "(", "'_'", ",", "' '", ")", "match", "=", "re", ".", "match", "(", "r'([^(]+) \\(([^)]+)\\)'", ",", "topic", ")", "if", "not", "match", ":", "return", "normalize", "(", "topic", ")", ",", "None", "else", ":", "return", "normalize", "(", "match", ".", "group", "(", "1", ")", ")", ",", "'n/'", "+", "match", ".", "group", "(", "2", ")", ".", "strip", "(", "' _'", ")" ]
Get a canonical representation of a Wikipedia topic, which may include a disambiguation string in parentheses. Returns (name, disambig), where "name" is the normalized topic name, and "disambig" is a string corresponding to the disambiguation text or None.
[ "Get", "a", "canonical", "representation", "of", "a", "Wikipedia", "topic", "which", "may", "include", "a", "disambiguation", "string", "in", "parentheses", "." ]
train
https://github.com/commonsense/metanl/blob/4b9ae8353489cc409bebd7e1fe10ab5b527b078e/metanl/nltk_morphy.py#L205-L220
santoshphilip/eppy
eppy/json_functions.py
key2elements
def key2elements(key): """split key to elements""" # words = key.split('.') # if len(words) == 4: # return words # # there is a dot in object name # fieldword = words.pop(-1) # nameword = '.'.join(words[-2:]) # if nameword[-1] in ('"', "'"): # # The object name is in quotes # nameword = nameword[1:-1] # elements = words[:-2] + [nameword, fieldword, ] # return elements words = key.split('.') first2words = words[:2] lastword = words[-1] namewords = words[2:-1] namephrase = '.'.join(namewords) if namephrase.startswith("'") and namephrase.endswith("'"): namephrase = namephrase[1:-1] return first2words + [namephrase] + [lastword]
python
def key2elements(key): """split key to elements""" # words = key.split('.') # if len(words) == 4: # return words # # there is a dot in object name # fieldword = words.pop(-1) # nameword = '.'.join(words[-2:]) # if nameword[-1] in ('"', "'"): # # The object name is in quotes # nameword = nameword[1:-1] # elements = words[:-2] + [nameword, fieldword, ] # return elements words = key.split('.') first2words = words[:2] lastword = words[-1] namewords = words[2:-1] namephrase = '.'.join(namewords) if namephrase.startswith("'") and namephrase.endswith("'"): namephrase = namephrase[1:-1] return first2words + [namephrase] + [lastword]
[ "def", "key2elements", "(", "key", ")", ":", "# words = key.split('.')", "# if len(words) == 4:", "# return words", "# # there is a dot in object name", "# fieldword = words.pop(-1)", "# nameword = '.'.join(words[-2:])", "# if nameword[-1] in ('\"', \"'\"):", "# # The object name is in quotes", "# nameword = nameword[1:-1]", "# elements = words[:-2] + [nameword, fieldword, ]", "# return elements", "words", "=", "key", ".", "split", "(", "'.'", ")", "first2words", "=", "words", "[", ":", "2", "]", "lastword", "=", "words", "[", "-", "1", "]", "namewords", "=", "words", "[", "2", ":", "-", "1", "]", "namephrase", "=", "'.'", ".", "join", "(", "namewords", ")", "if", "namephrase", ".", "startswith", "(", "\"'\"", ")", "and", "namephrase", ".", "endswith", "(", "\"'\"", ")", ":", "namephrase", "=", "namephrase", "[", "1", ":", "-", "1", "]", "return", "first2words", "+", "[", "namephrase", "]", "+", "[", "lastword", "]" ]
split key to elements
[ "split", "key", "to", "elements" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/json_functions.py#L14-L34
santoshphilip/eppy
eppy/json_functions.py
updateidf
def updateidf(idf, dct): """update idf using dct""" for key in list(dct.keys()): if key.startswith('idf.'): idftag, objkey, objname, field = key2elements(key) if objname == '': try: idfobj = idf.idfobjects[objkey.upper()][0] except IndexError as e: idfobj = idf.newidfobject(objkey.upper()) else: idfobj = idf.getobject(objkey.upper(), objname) if idfobj == None: idfobj = idf.newidfobject(objkey.upper(), Name=objname) idfobj[field] = dct[key]
python
def updateidf(idf, dct): """update idf using dct""" for key in list(dct.keys()): if key.startswith('idf.'): idftag, objkey, objname, field = key2elements(key) if objname == '': try: idfobj = idf.idfobjects[objkey.upper()][0] except IndexError as e: idfobj = idf.newidfobject(objkey.upper()) else: idfobj = idf.getobject(objkey.upper(), objname) if idfobj == None: idfobj = idf.newidfobject(objkey.upper(), Name=objname) idfobj[field] = dct[key]
[ "def", "updateidf", "(", "idf", ",", "dct", ")", ":", "for", "key", "in", "list", "(", "dct", ".", "keys", "(", ")", ")", ":", "if", "key", ".", "startswith", "(", "'idf.'", ")", ":", "idftag", ",", "objkey", ",", "objname", ",", "field", "=", "key2elements", "(", "key", ")", "if", "objname", "==", "''", ":", "try", ":", "idfobj", "=", "idf", ".", "idfobjects", "[", "objkey", ".", "upper", "(", ")", "]", "[", "0", "]", "except", "IndexError", "as", "e", ":", "idfobj", "=", "idf", ".", "newidfobject", "(", "objkey", ".", "upper", "(", ")", ")", "else", ":", "idfobj", "=", "idf", ".", "getobject", "(", "objkey", ".", "upper", "(", ")", ",", "objname", ")", "if", "idfobj", "==", "None", ":", "idfobj", "=", "idf", ".", "newidfobject", "(", "objkey", ".", "upper", "(", ")", ",", "Name", "=", "objname", ")", "idfobj", "[", "field", "]", "=", "dct", "[", "key", "]" ]
update idf using dct
[ "update", "idf", "using", "dct" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/json_functions.py#L37-L51
santoshphilip/eppy
eppy/fanpower.py
fan_bhp
def fan_bhp(fan_tot_eff, pascal, m3s): """return the fan power in bhp given fan efficiency, Pressure rise (Pa) and flow (m3/s)""" # from discussion in # http://energy-models.com/forum/baseline-fan-power-calculation inh2o = pascal2inh2o(pascal) cfm = m3s2cfm(m3s) return (cfm * inh2o * 1.0) / (6356.0 * fan_tot_eff)
python
def fan_bhp(fan_tot_eff, pascal, m3s): """return the fan power in bhp given fan efficiency, Pressure rise (Pa) and flow (m3/s)""" # from discussion in # http://energy-models.com/forum/baseline-fan-power-calculation inh2o = pascal2inh2o(pascal) cfm = m3s2cfm(m3s) return (cfm * inh2o * 1.0) / (6356.0 * fan_tot_eff)
[ "def", "fan_bhp", "(", "fan_tot_eff", ",", "pascal", ",", "m3s", ")", ":", "# from discussion in", "# http://energy-models.com/forum/baseline-fan-power-calculation", "inh2o", "=", "pascal2inh2o", "(", "pascal", ")", "cfm", "=", "m3s2cfm", "(", "m3s", ")", "return", "(", "cfm", "*", "inh2o", "*", "1.0", ")", "/", "(", "6356.0", "*", "fan_tot_eff", ")" ]
return the fan power in bhp given fan efficiency, Pressure rise (Pa) and flow (m3/s)
[ "return", "the", "fan", "power", "in", "bhp", "given", "fan", "efficiency", "Pressure", "rise", "(", "Pa", ")", "and", "flow", "(", "m3", "/", "s", ")" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/fanpower.py#L37-L43
santoshphilip/eppy
eppy/fanpower.py
bhp2pascal
def bhp2pascal(bhp, cfm, fan_tot_eff): """return inputs for E+ in pascal and m3/s""" inh2o = bhp * 6356.0 * fan_tot_eff / cfm pascal = inh2o2pascal(inh2o) m3s = cfm2m3s(cfm) return pascal, m3s
python
def bhp2pascal(bhp, cfm, fan_tot_eff): """return inputs for E+ in pascal and m3/s""" inh2o = bhp * 6356.0 * fan_tot_eff / cfm pascal = inh2o2pascal(inh2o) m3s = cfm2m3s(cfm) return pascal, m3s
[ "def", "bhp2pascal", "(", "bhp", ",", "cfm", ",", "fan_tot_eff", ")", ":", "inh2o", "=", "bhp", "*", "6356.0", "*", "fan_tot_eff", "/", "cfm", "pascal", "=", "inh2o2pascal", "(", "inh2o", ")", "m3s", "=", "cfm2m3s", "(", "cfm", ")", "return", "pascal", ",", "m3s" ]
return inputs for E+ in pascal and m3/s
[ "return", "inputs", "for", "E", "+", "in", "pascal", "and", "m3", "/", "s" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/fanpower.py#L45-L50
santoshphilip/eppy
eppy/fanpower.py
fan_watts
def fan_watts(fan_tot_eff, pascal, m3s): """return the fan power in watts given fan efficiency, Pressure rise (Pa) and flow (m3/s)""" # got this from a google search bhp = fan_bhp(fan_tot_eff, pascal, m3s) return bhp2watts(bhp)
python
def fan_watts(fan_tot_eff, pascal, m3s): """return the fan power in watts given fan efficiency, Pressure rise (Pa) and flow (m3/s)""" # got this from a google search bhp = fan_bhp(fan_tot_eff, pascal, m3s) return bhp2watts(bhp)
[ "def", "fan_watts", "(", "fan_tot_eff", ",", "pascal", ",", "m3s", ")", ":", "# got this from a google search", "bhp", "=", "fan_bhp", "(", "fan_tot_eff", ",", "pascal", ",", "m3s", ")", "return", "bhp2watts", "(", "bhp", ")" ]
return the fan power in watts given fan efficiency, Pressure rise (Pa) and flow (m3/s)
[ "return", "the", "fan", "power", "in", "watts", "given", "fan", "efficiency", "Pressure", "rise", "(", "Pa", ")", "and", "flow", "(", "m3", "/", "s", ")" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/fanpower.py#L61-L65
santoshphilip/eppy
eppy/fanpower.py
watts2pascal
def watts2pascal(watts, cfm, fan_tot_eff): """convert and return inputs for E+ in pascal and m3/s""" bhp = watts2bhp(watts) return bhp2pascal(bhp, cfm, fan_tot_eff)
python
def watts2pascal(watts, cfm, fan_tot_eff): """convert and return inputs for E+ in pascal and m3/s""" bhp = watts2bhp(watts) return bhp2pascal(bhp, cfm, fan_tot_eff)
[ "def", "watts2pascal", "(", "watts", ",", "cfm", ",", "fan_tot_eff", ")", ":", "bhp", "=", "watts2bhp", "(", "watts", ")", "return", "bhp2pascal", "(", "bhp", ",", "cfm", ",", "fan_tot_eff", ")" ]
convert and return inputs for E+ in pascal and m3/s
[ "convert", "and", "return", "inputs", "for", "E", "+", "in", "pascal", "and", "m3", "/", "s" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/fanpower.py#L67-L70
santoshphilip/eppy
eppy/fanpower.py
fanpower_bhp
def fanpower_bhp(ddtt): """return fan power in bhp given the fan IDF object""" from eppy.bunch_subclass import BadEPFieldError # here to prevent circular dependency try: fan_tot_eff = ddtt.Fan_Total_Efficiency # from V+ V8.7.0 onwards except BadEPFieldError as e: fan_tot_eff = ddtt.Fan_Efficiency pascal = float(ddtt.Pressure_Rise) if str(ddtt.Maximum_Flow_Rate).lower() == 'autosize': # str can fail with unicode chars :-( return 'autosize' else: m3s = float(ddtt.Maximum_Flow_Rate) return fan_bhp(fan_tot_eff, pascal, m3s)
python
def fanpower_bhp(ddtt): """return fan power in bhp given the fan IDF object""" from eppy.bunch_subclass import BadEPFieldError # here to prevent circular dependency try: fan_tot_eff = ddtt.Fan_Total_Efficiency # from V+ V8.7.0 onwards except BadEPFieldError as e: fan_tot_eff = ddtt.Fan_Efficiency pascal = float(ddtt.Pressure_Rise) if str(ddtt.Maximum_Flow_Rate).lower() == 'autosize': # str can fail with unicode chars :-( return 'autosize' else: m3s = float(ddtt.Maximum_Flow_Rate) return fan_bhp(fan_tot_eff, pascal, m3s)
[ "def", "fanpower_bhp", "(", "ddtt", ")", ":", "from", "eppy", ".", "bunch_subclass", "import", "BadEPFieldError", "# here to prevent circular dependency", "try", ":", "fan_tot_eff", "=", "ddtt", ".", "Fan_Total_Efficiency", "# from V+ V8.7.0 onwards", "except", "BadEPFieldError", "as", "e", ":", "fan_tot_eff", "=", "ddtt", ".", "Fan_Efficiency", "pascal", "=", "float", "(", "ddtt", ".", "Pressure_Rise", ")", "if", "str", "(", "ddtt", ".", "Maximum_Flow_Rate", ")", ".", "lower", "(", ")", "==", "'autosize'", ":", "# str can fail with unicode chars :-(", "return", "'autosize'", "else", ":", "m3s", "=", "float", "(", "ddtt", ".", "Maximum_Flow_Rate", ")", "return", "fan_bhp", "(", "fan_tot_eff", ",", "pascal", ",", "m3s", ")" ]
return fan power in bhp given the fan IDF object
[ "return", "fan", "power", "in", "bhp", "given", "the", "fan", "IDF", "object" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/fanpower.py#L72-L85
santoshphilip/eppy
eppy/fanpower.py
fanpower_watts
def fanpower_watts(ddtt): """return fan power in bhp given the fan IDF object""" from eppy.bunch_subclass import BadEPFieldError # here to prevent circular dependency try: fan_tot_eff = ddtt.Fan_Total_Efficiency # from V+ V8.7.0 onwards except BadEPFieldError as e: fan_tot_eff = ddtt.Fan_Efficiency pascal = float(ddtt.Pressure_Rise) if str(ddtt.Maximum_Flow_Rate).lower() == 'autosize': # str can fail with unicode chars :-( return 'autosize' else: m3s = float(ddtt.Maximum_Flow_Rate) return fan_watts(fan_tot_eff, pascal, m3s)
python
def fanpower_watts(ddtt): """return fan power in bhp given the fan IDF object""" from eppy.bunch_subclass import BadEPFieldError # here to prevent circular dependency try: fan_tot_eff = ddtt.Fan_Total_Efficiency # from V+ V8.7.0 onwards except BadEPFieldError as e: fan_tot_eff = ddtt.Fan_Efficiency pascal = float(ddtt.Pressure_Rise) if str(ddtt.Maximum_Flow_Rate).lower() == 'autosize': # str can fail with unicode chars :-( return 'autosize' else: m3s = float(ddtt.Maximum_Flow_Rate) return fan_watts(fan_tot_eff, pascal, m3s)
[ "def", "fanpower_watts", "(", "ddtt", ")", ":", "from", "eppy", ".", "bunch_subclass", "import", "BadEPFieldError", "# here to prevent circular dependency", "try", ":", "fan_tot_eff", "=", "ddtt", ".", "Fan_Total_Efficiency", "# from V+ V8.7.0 onwards", "except", "BadEPFieldError", "as", "e", ":", "fan_tot_eff", "=", "ddtt", ".", "Fan_Efficiency", "pascal", "=", "float", "(", "ddtt", ".", "Pressure_Rise", ")", "if", "str", "(", "ddtt", ".", "Maximum_Flow_Rate", ")", ".", "lower", "(", ")", "==", "'autosize'", ":", "# str can fail with unicode chars :-(", "return", "'autosize'", "else", ":", "m3s", "=", "float", "(", "ddtt", ".", "Maximum_Flow_Rate", ")", "return", "fan_watts", "(", "fan_tot_eff", ",", "pascal", ",", "m3s", ")" ]
return fan power in bhp given the fan IDF object
[ "return", "fan", "power", "in", "bhp", "given", "the", "fan", "IDF", "object" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/fanpower.py#L87-L100
santoshphilip/eppy
eppy/fanpower.py
fan_maxcfm
def fan_maxcfm(ddtt): """return the fan max cfm""" if str(ddtt.Maximum_Flow_Rate).lower() == 'autosize': # str can fail with unicode chars :-( return 'autosize' else: m3s = float(ddtt.Maximum_Flow_Rate) return m3s2cfm(m3s)
python
def fan_maxcfm(ddtt): """return the fan max cfm""" if str(ddtt.Maximum_Flow_Rate).lower() == 'autosize': # str can fail with unicode chars :-( return 'autosize' else: m3s = float(ddtt.Maximum_Flow_Rate) return m3s2cfm(m3s)
[ "def", "fan_maxcfm", "(", "ddtt", ")", ":", "if", "str", "(", "ddtt", ".", "Maximum_Flow_Rate", ")", ".", "lower", "(", ")", "==", "'autosize'", ":", "# str can fail with unicode chars :-(", "return", "'autosize'", "else", ":", "m3s", "=", "float", "(", "ddtt", ".", "Maximum_Flow_Rate", ")", "return", "m3s2cfm", "(", "m3s", ")" ]
return the fan max cfm
[ "return", "the", "fan", "max", "cfm" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/fanpower.py#L102-L109
santoshphilip/eppy
eppy/runner/run_functions.py
install_paths
def install_paths(version=None, iddname=None): """Get the install paths for EnergyPlus executable and weather files. We prefer to get the install path from the IDD name but fall back to getting it from the version number for backwards compatibility and to simplify tests. Parameters ---------- version : str, optional EnergyPlus version in the format "X-X-X", e.g. "8-7-0". iddname : str, optional File path to the IDD. Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_weather : str Full path to the EnergyPlus weather directory. """ try: eplus_exe, eplus_home = paths_from_iddname(iddname) except (AttributeError, TypeError, ValueError): eplus_exe, eplus_home = paths_from_version(version) eplus_weather = os.path.join(eplus_home, 'WeatherData') return eplus_exe, eplus_weather
python
def install_paths(version=None, iddname=None): """Get the install paths for EnergyPlus executable and weather files. We prefer to get the install path from the IDD name but fall back to getting it from the version number for backwards compatibility and to simplify tests. Parameters ---------- version : str, optional EnergyPlus version in the format "X-X-X", e.g. "8-7-0". iddname : str, optional File path to the IDD. Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_weather : str Full path to the EnergyPlus weather directory. """ try: eplus_exe, eplus_home = paths_from_iddname(iddname) except (AttributeError, TypeError, ValueError): eplus_exe, eplus_home = paths_from_version(version) eplus_weather = os.path.join(eplus_home, 'WeatherData') return eplus_exe, eplus_weather
[ "def", "install_paths", "(", "version", "=", "None", ",", "iddname", "=", "None", ")", ":", "try", ":", "eplus_exe", ",", "eplus_home", "=", "paths_from_iddname", "(", "iddname", ")", "except", "(", "AttributeError", ",", "TypeError", ",", "ValueError", ")", ":", "eplus_exe", ",", "eplus_home", "=", "paths_from_version", "(", "version", ")", "eplus_weather", "=", "os", ".", "path", ".", "join", "(", "eplus_home", ",", "'WeatherData'", ")", "return", "eplus_exe", ",", "eplus_weather" ]
Get the install paths for EnergyPlus executable and weather files. We prefer to get the install path from the IDD name but fall back to getting it from the version number for backwards compatibility and to simplify tests. Parameters ---------- version : str, optional EnergyPlus version in the format "X-X-X", e.g. "8-7-0". iddname : str, optional File path to the IDD. Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_weather : str Full path to the EnergyPlus weather directory.
[ "Get", "the", "install", "paths", "for", "EnergyPlus", "executable", "and", "weather", "files", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/runner/run_functions.py#L29-L57
santoshphilip/eppy
eppy/runner/run_functions.py
paths_from_iddname
def paths_from_iddname(iddname): """Get the EnergyPlus install directory and executable path. Parameters ---------- iddname : str, optional File path to the IDD. Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_home : str Full path to the EnergyPlus install directory. Raises ------ AttributeError (TypeError on Windows) If iddname does not have a directory component (e.g. if None). ValueError If eplus_exe is not a file. """ eplus_home = os.path.abspath(os.path.dirname(iddname)) if platform.system() == 'Windows': eplus_exe = os.path.join(eplus_home, 'energyplus.exe') elif platform.system() == "Linux": eplus_exe = os.path.join(eplus_home, 'energyplus') else: eplus_exe = os.path.join(eplus_home, 'energyplus') if not os.path.isfile(eplus_exe): raise ValueError return eplus_exe, eplus_home
python
def paths_from_iddname(iddname): """Get the EnergyPlus install directory and executable path. Parameters ---------- iddname : str, optional File path to the IDD. Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_home : str Full path to the EnergyPlus install directory. Raises ------ AttributeError (TypeError on Windows) If iddname does not have a directory component (e.g. if None). ValueError If eplus_exe is not a file. """ eplus_home = os.path.abspath(os.path.dirname(iddname)) if platform.system() == 'Windows': eplus_exe = os.path.join(eplus_home, 'energyplus.exe') elif platform.system() == "Linux": eplus_exe = os.path.join(eplus_home, 'energyplus') else: eplus_exe = os.path.join(eplus_home, 'energyplus') if not os.path.isfile(eplus_exe): raise ValueError return eplus_exe, eplus_home
[ "def", "paths_from_iddname", "(", "iddname", ")", ":", "eplus_home", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "iddname", ")", ")", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "eplus_exe", "=", "os", ".", "path", ".", "join", "(", "eplus_home", ",", "'energyplus.exe'", ")", "elif", "platform", ".", "system", "(", ")", "==", "\"Linux\"", ":", "eplus_exe", "=", "os", ".", "path", ".", "join", "(", "eplus_home", ",", "'energyplus'", ")", "else", ":", "eplus_exe", "=", "os", ".", "path", ".", "join", "(", "eplus_home", ",", "'energyplus'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "eplus_exe", ")", ":", "raise", "ValueError", "return", "eplus_exe", ",", "eplus_home" ]
Get the EnergyPlus install directory and executable path. Parameters ---------- iddname : str, optional File path to the IDD. Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_home : str Full path to the EnergyPlus install directory. Raises ------ AttributeError (TypeError on Windows) If iddname does not have a directory component (e.g. if None). ValueError If eplus_exe is not a file.
[ "Get", "the", "EnergyPlus", "install", "directory", "and", "executable", "path", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/runner/run_functions.py#L60-L92
santoshphilip/eppy
eppy/runner/run_functions.py
paths_from_version
def paths_from_version(version): """Get the EnergyPlus install directory and executable path. Parameters ---------- version : str, optional EnergyPlus version in the format "X-X-X", e.g. "8-7-0". Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_home : str Full path to the EnergyPlus install directory. """ if platform.system() == 'Windows': eplus_home = "C:/EnergyPlusV{version}".format(version=version) eplus_exe = os.path.join(eplus_home, 'energyplus.exe') elif platform.system() == "Linux": eplus_home = "/usr/local/EnergyPlus-{version}".format(version=version) eplus_exe = os.path.join(eplus_home, 'energyplus') else: eplus_home = "/Applications/EnergyPlus-{version}".format(version=version) eplus_exe = os.path.join(eplus_home, 'energyplus') return eplus_exe, eplus_home
python
def paths_from_version(version): """Get the EnergyPlus install directory and executable path. Parameters ---------- version : str, optional EnergyPlus version in the format "X-X-X", e.g. "8-7-0". Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_home : str Full path to the EnergyPlus install directory. """ if platform.system() == 'Windows': eplus_home = "C:/EnergyPlusV{version}".format(version=version) eplus_exe = os.path.join(eplus_home, 'energyplus.exe') elif platform.system() == "Linux": eplus_home = "/usr/local/EnergyPlus-{version}".format(version=version) eplus_exe = os.path.join(eplus_home, 'energyplus') else: eplus_home = "/Applications/EnergyPlus-{version}".format(version=version) eplus_exe = os.path.join(eplus_home, 'energyplus') return eplus_exe, eplus_home
[ "def", "paths_from_version", "(", "version", ")", ":", "if", "platform", ".", "system", "(", ")", "==", "'Windows'", ":", "eplus_home", "=", "\"C:/EnergyPlusV{version}\"", ".", "format", "(", "version", "=", "version", ")", "eplus_exe", "=", "os", ".", "path", ".", "join", "(", "eplus_home", ",", "'energyplus.exe'", ")", "elif", "platform", ".", "system", "(", ")", "==", "\"Linux\"", ":", "eplus_home", "=", "\"/usr/local/EnergyPlus-{version}\"", ".", "format", "(", "version", "=", "version", ")", "eplus_exe", "=", "os", ".", "path", ".", "join", "(", "eplus_home", ",", "'energyplus'", ")", "else", ":", "eplus_home", "=", "\"/Applications/EnergyPlus-{version}\"", ".", "format", "(", "version", "=", "version", ")", "eplus_exe", "=", "os", ".", "path", ".", "join", "(", "eplus_home", ",", "'energyplus'", ")", "return", "eplus_exe", ",", "eplus_home" ]
Get the EnergyPlus install directory and executable path. Parameters ---------- version : str, optional EnergyPlus version in the format "X-X-X", e.g. "8-7-0". Returns ------- eplus_exe : str Full path to the EnergyPlus executable. eplus_home : str Full path to the EnergyPlus install directory.
[ "Get", "the", "EnergyPlus", "install", "directory", "and", "executable", "path", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/runner/run_functions.py#L95-L120
santoshphilip/eppy
eppy/runner/run_functions.py
wrapped_help_text
def wrapped_help_text(wrapped_func): """Decorator to pass through the documentation from a wrapped function. """ def decorator(wrapper_func): """The decorator. Parameters ---------- f : callable The wrapped function. """ wrapper_func.__doc__ = ('This method wraps the following method:\n\n' + pydoc.text.document(wrapped_func)) return wrapper_func return decorator
python
def wrapped_help_text(wrapped_func): """Decorator to pass through the documentation from a wrapped function. """ def decorator(wrapper_func): """The decorator. Parameters ---------- f : callable The wrapped function. """ wrapper_func.__doc__ = ('This method wraps the following method:\n\n' + pydoc.text.document(wrapped_func)) return wrapper_func return decorator
[ "def", "wrapped_help_text", "(", "wrapped_func", ")", ":", "def", "decorator", "(", "wrapper_func", ")", ":", "\"\"\"The decorator.\n\n Parameters\n ----------\n f : callable\n The wrapped function.\n\n \"\"\"", "wrapper_func", ".", "__doc__", "=", "(", "'This method wraps the following method:\\n\\n'", "+", "pydoc", ".", "text", ".", "document", "(", "wrapped_func", ")", ")", "return", "wrapper_func", "return", "decorator" ]
Decorator to pass through the documentation from a wrapped function.
[ "Decorator", "to", "pass", "through", "the", "documentation", "from", "a", "wrapped", "function", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/runner/run_functions.py#L123-L138
santoshphilip/eppy
eppy/runner/run_functions.py
runIDFs
def runIDFs(jobs, processors=1): """Wrapper for run() to be used when running IDF5 runs in parallel. Parameters ---------- jobs : iterable A list or generator made up of an IDF5 object and a kwargs dict (see `run_functions.run` for valid keywords). processors : int, optional Number of processors to run on (default: 1). If 0 is passed then the process will run on all CPUs, -1 means one less than all CPUs, etc. """ if processors <= 0: processors = max(1, mp.cpu_count() - processors) shutil.rmtree("multi_runs", ignore_errors=True) os.mkdir("multi_runs") prepared_runs = (prepare_run(run_id, run_data) for run_id, run_data in enumerate(jobs)) try: pool = mp.Pool(processors) pool.map(multirunner, prepared_runs) pool.close() except NameError: # multiprocessing not present so pass the jobs one at a time for job in prepared_runs: multirunner([job]) shutil.rmtree("multi_runs", ignore_errors=True)
python
def runIDFs(jobs, processors=1): """Wrapper for run() to be used when running IDF5 runs in parallel. Parameters ---------- jobs : iterable A list or generator made up of an IDF5 object and a kwargs dict (see `run_functions.run` for valid keywords). processors : int, optional Number of processors to run on (default: 1). If 0 is passed then the process will run on all CPUs, -1 means one less than all CPUs, etc. """ if processors <= 0: processors = max(1, mp.cpu_count() - processors) shutil.rmtree("multi_runs", ignore_errors=True) os.mkdir("multi_runs") prepared_runs = (prepare_run(run_id, run_data) for run_id, run_data in enumerate(jobs)) try: pool = mp.Pool(processors) pool.map(multirunner, prepared_runs) pool.close() except NameError: # multiprocessing not present so pass the jobs one at a time for job in prepared_runs: multirunner([job]) shutil.rmtree("multi_runs", ignore_errors=True)
[ "def", "runIDFs", "(", "jobs", ",", "processors", "=", "1", ")", ":", "if", "processors", "<=", "0", ":", "processors", "=", "max", "(", "1", ",", "mp", ".", "cpu_count", "(", ")", "-", "processors", ")", "shutil", ".", "rmtree", "(", "\"multi_runs\"", ",", "ignore_errors", "=", "True", ")", "os", ".", "mkdir", "(", "\"multi_runs\"", ")", "prepared_runs", "=", "(", "prepare_run", "(", "run_id", ",", "run_data", ")", "for", "run_id", ",", "run_data", "in", "enumerate", "(", "jobs", ")", ")", "try", ":", "pool", "=", "mp", ".", "Pool", "(", "processors", ")", "pool", ".", "map", "(", "multirunner", ",", "prepared_runs", ")", "pool", ".", "close", "(", ")", "except", "NameError", ":", "# multiprocessing not present so pass the jobs one at a time", "for", "job", "in", "prepared_runs", ":", "multirunner", "(", "[", "job", "]", ")", "shutil", ".", "rmtree", "(", "\"multi_runs\"", ",", "ignore_errors", "=", "True", ")" ]
Wrapper for run() to be used when running IDF5 runs in parallel. Parameters ---------- jobs : iterable A list or generator made up of an IDF5 object and a kwargs dict (see `run_functions.run` for valid keywords). processors : int, optional Number of processors to run on (default: 1). If 0 is passed then the process will run on all CPUs, -1 means one less than all CPUs, etc.
[ "Wrapper", "for", "run", "()", "to", "be", "used", "when", "running", "IDF5", "runs", "in", "parallel", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/runner/run_functions.py#L141-L169
santoshphilip/eppy
eppy/runner/run_functions.py
prepare_run
def prepare_run(run_id, run_data): """Prepare run inputs for one of multiple EnergyPlus runs. :param run_id: An ID number for naming the IDF. :param run_data: Tuple of the IDF and keyword args to pass to EnergyPlus executable. :return: Tuple of the IDF path and EPW, and the keyword args. """ idf, kwargs = run_data epw = idf.epw idf_dir = os.path.join('multi_runs', 'idf_%i' % run_id) os.mkdir(idf_dir) idf_path = os.path.join(idf_dir, 'in.idf') idf.saveas(idf_path) return (idf_path, epw), kwargs
python
def prepare_run(run_id, run_data): """Prepare run inputs for one of multiple EnergyPlus runs. :param run_id: An ID number for naming the IDF. :param run_data: Tuple of the IDF and keyword args to pass to EnergyPlus executable. :return: Tuple of the IDF path and EPW, and the keyword args. """ idf, kwargs = run_data epw = idf.epw idf_dir = os.path.join('multi_runs', 'idf_%i' % run_id) os.mkdir(idf_dir) idf_path = os.path.join(idf_dir, 'in.idf') idf.saveas(idf_path) return (idf_path, epw), kwargs
[ "def", "prepare_run", "(", "run_id", ",", "run_data", ")", ":", "idf", ",", "kwargs", "=", "run_data", "epw", "=", "idf", ".", "epw", "idf_dir", "=", "os", ".", "path", ".", "join", "(", "'multi_runs'", ",", "'idf_%i'", "%", "run_id", ")", "os", ".", "mkdir", "(", "idf_dir", ")", "idf_path", "=", "os", ".", "path", ".", "join", "(", "idf_dir", ",", "'in.idf'", ")", "idf", ".", "saveas", "(", "idf_path", ")", "return", "(", "idf_path", ",", "epw", ")", ",", "kwargs" ]
Prepare run inputs for one of multiple EnergyPlus runs. :param run_id: An ID number for naming the IDF. :param run_data: Tuple of the IDF and keyword args to pass to EnergyPlus executable. :return: Tuple of the IDF path and EPW, and the keyword args.
[ "Prepare", "run", "inputs", "for", "one", "of", "multiple", "EnergyPlus", "runs", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/runner/run_functions.py#L172-L185
santoshphilip/eppy
eppy/runner/run_functions.py
run
def run(idf=None, weather=None, output_directory='', annual=False, design_day=False, idd=None, epmacro=False, expandobjects=False, readvars=False, output_prefix=None, output_suffix=None, version=False, verbose='v', ep_version=None): """ Wrapper around the EnergyPlus command line interface. Parameters ---------- idf : str Full or relative path to the IDF file to be run, or an IDF object. weather : str Full or relative path to the weather file. output_directory : str, optional Full or relative path to an output directory (default: 'run_outputs) annual : bool, optional If True then force annual simulation (default: False) design_day : bool, optional Force design-day-only simulation (default: False) idd : str, optional Input data dictionary (default: Energy+.idd in EnergyPlus directory) epmacro : str, optional Run EPMacro prior to simulation (default: False). expandobjects : bool, optional Run ExpandObjects prior to simulation (default: False) readvars : bool, optional Run ReadVarsESO after simulation (default: False) output_prefix : str, optional Prefix for output file names (default: eplus) output_suffix : str, optional Suffix style for output file names (default: L) L: Legacy (e.g., eplustbl.csv) C: Capital (e.g., eplusTable.csv) D: Dash (e.g., eplus-table.csv) version : bool, optional Display version information (default: False) verbose: str Set verbosity of runtime messages (default: v) v: verbose q: quiet ep_version: str EnergyPlus version, used to find install directory. Required if run() is called with an IDF file path rather than an IDF object. Returns ------- str : status Raises ------ CalledProcessError AttributeError If no ep_version parameter is passed when calling with an IDF file path rather than an IDF object. """ args = locals().copy() # get unneeded params out of args ready to pass the rest to energyplus.exe verbose = args.pop('verbose') idf = args.pop('idf') iddname = args.get('idd') if not isinstance(iddname, str): args.pop('idd') try: idf_path = os.path.abspath(idf.idfname) except AttributeError: idf_path = os.path.abspath(idf) ep_version = args.pop('ep_version') # get version from IDF object or by parsing the IDF file for it if not ep_version: try: ep_version = '-'.join(str(x) for x in idf.idd_version[:3]) except AttributeError: raise AttributeError( "The ep_version must be set when passing an IDF path. \ Alternatively, use IDF.run()") eplus_exe_path, eplus_weather_path = install_paths(ep_version, iddname) if version: # just get EnergyPlus version number and return cmd = [eplus_exe_path, '--version'] check_call(cmd) return # convert paths to absolute paths if required if os.path.isfile(args['weather']): args['weather'] = os.path.abspath(args['weather']) else: args['weather'] = os.path.join(eplus_weather_path, args['weather']) output_dir = os.path.abspath(args['output_directory']) args['output_directory'] = output_dir # store the directory we start in cwd = os.getcwd() run_dir = os.path.abspath(tempfile.mkdtemp()) os.chdir(run_dir) # build a list of command line arguments cmd = [eplus_exe_path] for arg in args: if args[arg]: if isinstance(args[arg], bool): args[arg] = '' cmd.extend(['--{}'.format(arg.replace('_', '-'))]) if args[arg] != "": cmd.extend([args[arg]]) cmd.extend([idf_path]) try: if verbose == 'v': print("\r\n" + " ".join(cmd) + "\r\n") check_call(cmd) elif verbose == 'q': check_call(cmd, stdout=open(os.devnull, 'w')) except CalledProcessError: message = parse_error(output_dir) raise EnergyPlusRunError(message) finally: os.chdir(cwd) return 'OK'
python
def run(idf=None, weather=None, output_directory='', annual=False, design_day=False, idd=None, epmacro=False, expandobjects=False, readvars=False, output_prefix=None, output_suffix=None, version=False, verbose='v', ep_version=None): """ Wrapper around the EnergyPlus command line interface. Parameters ---------- idf : str Full or relative path to the IDF file to be run, or an IDF object. weather : str Full or relative path to the weather file. output_directory : str, optional Full or relative path to an output directory (default: 'run_outputs) annual : bool, optional If True then force annual simulation (default: False) design_day : bool, optional Force design-day-only simulation (default: False) idd : str, optional Input data dictionary (default: Energy+.idd in EnergyPlus directory) epmacro : str, optional Run EPMacro prior to simulation (default: False). expandobjects : bool, optional Run ExpandObjects prior to simulation (default: False) readvars : bool, optional Run ReadVarsESO after simulation (default: False) output_prefix : str, optional Prefix for output file names (default: eplus) output_suffix : str, optional Suffix style for output file names (default: L) L: Legacy (e.g., eplustbl.csv) C: Capital (e.g., eplusTable.csv) D: Dash (e.g., eplus-table.csv) version : bool, optional Display version information (default: False) verbose: str Set verbosity of runtime messages (default: v) v: verbose q: quiet ep_version: str EnergyPlus version, used to find install directory. Required if run() is called with an IDF file path rather than an IDF object. Returns ------- str : status Raises ------ CalledProcessError AttributeError If no ep_version parameter is passed when calling with an IDF file path rather than an IDF object. """ args = locals().copy() # get unneeded params out of args ready to pass the rest to energyplus.exe verbose = args.pop('verbose') idf = args.pop('idf') iddname = args.get('idd') if not isinstance(iddname, str): args.pop('idd') try: idf_path = os.path.abspath(idf.idfname) except AttributeError: idf_path = os.path.abspath(idf) ep_version = args.pop('ep_version') # get version from IDF object or by parsing the IDF file for it if not ep_version: try: ep_version = '-'.join(str(x) for x in idf.idd_version[:3]) except AttributeError: raise AttributeError( "The ep_version must be set when passing an IDF path. \ Alternatively, use IDF.run()") eplus_exe_path, eplus_weather_path = install_paths(ep_version, iddname) if version: # just get EnergyPlus version number and return cmd = [eplus_exe_path, '--version'] check_call(cmd) return # convert paths to absolute paths if required if os.path.isfile(args['weather']): args['weather'] = os.path.abspath(args['weather']) else: args['weather'] = os.path.join(eplus_weather_path, args['weather']) output_dir = os.path.abspath(args['output_directory']) args['output_directory'] = output_dir # store the directory we start in cwd = os.getcwd() run_dir = os.path.abspath(tempfile.mkdtemp()) os.chdir(run_dir) # build a list of command line arguments cmd = [eplus_exe_path] for arg in args: if args[arg]: if isinstance(args[arg], bool): args[arg] = '' cmd.extend(['--{}'.format(arg.replace('_', '-'))]) if args[arg] != "": cmd.extend([args[arg]]) cmd.extend([idf_path]) try: if verbose == 'v': print("\r\n" + " ".join(cmd) + "\r\n") check_call(cmd) elif verbose == 'q': check_call(cmd, stdout=open(os.devnull, 'w')) except CalledProcessError: message = parse_error(output_dir) raise EnergyPlusRunError(message) finally: os.chdir(cwd) return 'OK'
[ "def", "run", "(", "idf", "=", "None", ",", "weather", "=", "None", ",", "output_directory", "=", "''", ",", "annual", "=", "False", ",", "design_day", "=", "False", ",", "idd", "=", "None", ",", "epmacro", "=", "False", ",", "expandobjects", "=", "False", ",", "readvars", "=", "False", ",", "output_prefix", "=", "None", ",", "output_suffix", "=", "None", ",", "version", "=", "False", ",", "verbose", "=", "'v'", ",", "ep_version", "=", "None", ")", ":", "args", "=", "locals", "(", ")", ".", "copy", "(", ")", "# get unneeded params out of args ready to pass the rest to energyplus.exe", "verbose", "=", "args", ".", "pop", "(", "'verbose'", ")", "idf", "=", "args", ".", "pop", "(", "'idf'", ")", "iddname", "=", "args", ".", "get", "(", "'idd'", ")", "if", "not", "isinstance", "(", "iddname", ",", "str", ")", ":", "args", ".", "pop", "(", "'idd'", ")", "try", ":", "idf_path", "=", "os", ".", "path", ".", "abspath", "(", "idf", ".", "idfname", ")", "except", "AttributeError", ":", "idf_path", "=", "os", ".", "path", ".", "abspath", "(", "idf", ")", "ep_version", "=", "args", ".", "pop", "(", "'ep_version'", ")", "# get version from IDF object or by parsing the IDF file for it", "if", "not", "ep_version", ":", "try", ":", "ep_version", "=", "'-'", ".", "join", "(", "str", "(", "x", ")", "for", "x", "in", "idf", ".", "idd_version", "[", ":", "3", "]", ")", "except", "AttributeError", ":", "raise", "AttributeError", "(", "\"The ep_version must be set when passing an IDF path. \\\n Alternatively, use IDF.run()\"", ")", "eplus_exe_path", ",", "eplus_weather_path", "=", "install_paths", "(", "ep_version", ",", "iddname", ")", "if", "version", ":", "# just get EnergyPlus version number and return", "cmd", "=", "[", "eplus_exe_path", ",", "'--version'", "]", "check_call", "(", "cmd", ")", "return", "# convert paths to absolute paths if required", "if", "os", ".", "path", ".", "isfile", "(", "args", "[", "'weather'", "]", ")", ":", "args", "[", "'weather'", "]", "=", "os", ".", "path", ".", "abspath", "(", "args", "[", "'weather'", "]", ")", "else", ":", "args", "[", "'weather'", "]", "=", "os", ".", "path", ".", "join", "(", "eplus_weather_path", ",", "args", "[", "'weather'", "]", ")", "output_dir", "=", "os", ".", "path", ".", "abspath", "(", "args", "[", "'output_directory'", "]", ")", "args", "[", "'output_directory'", "]", "=", "output_dir", "# store the directory we start in", "cwd", "=", "os", ".", "getcwd", "(", ")", "run_dir", "=", "os", ".", "path", ".", "abspath", "(", "tempfile", ".", "mkdtemp", "(", ")", ")", "os", ".", "chdir", "(", "run_dir", ")", "# build a list of command line arguments", "cmd", "=", "[", "eplus_exe_path", "]", "for", "arg", "in", "args", ":", "if", "args", "[", "arg", "]", ":", "if", "isinstance", "(", "args", "[", "arg", "]", ",", "bool", ")", ":", "args", "[", "arg", "]", "=", "''", "cmd", ".", "extend", "(", "[", "'--{}'", ".", "format", "(", "arg", ".", "replace", "(", "'_'", ",", "'-'", ")", ")", "]", ")", "if", "args", "[", "arg", "]", "!=", "\"\"", ":", "cmd", ".", "extend", "(", "[", "args", "[", "arg", "]", "]", ")", "cmd", ".", "extend", "(", "[", "idf_path", "]", ")", "try", ":", "if", "verbose", "==", "'v'", ":", "print", "(", "\"\\r\\n\"", "+", "\" \"", ".", "join", "(", "cmd", ")", "+", "\"\\r\\n\"", ")", "check_call", "(", "cmd", ")", "elif", "verbose", "==", "'q'", ":", "check_call", "(", "cmd", ",", "stdout", "=", "open", "(", "os", ".", "devnull", ",", "'w'", ")", ")", "except", "CalledProcessError", ":", "message", "=", "parse_error", "(", "output_dir", ")", "raise", "EnergyPlusRunError", "(", "message", ")", "finally", ":", "os", ".", "chdir", "(", "cwd", ")", "return", "'OK'" ]
Wrapper around the EnergyPlus command line interface. Parameters ---------- idf : str Full or relative path to the IDF file to be run, or an IDF object. weather : str Full or relative path to the weather file. output_directory : str, optional Full or relative path to an output directory (default: 'run_outputs) annual : bool, optional If True then force annual simulation (default: False) design_day : bool, optional Force design-day-only simulation (default: False) idd : str, optional Input data dictionary (default: Energy+.idd in EnergyPlus directory) epmacro : str, optional Run EPMacro prior to simulation (default: False). expandobjects : bool, optional Run ExpandObjects prior to simulation (default: False) readvars : bool, optional Run ReadVarsESO after simulation (default: False) output_prefix : str, optional Prefix for output file names (default: eplus) output_suffix : str, optional Suffix style for output file names (default: L) L: Legacy (e.g., eplustbl.csv) C: Capital (e.g., eplusTable.csv) D: Dash (e.g., eplus-table.csv) version : bool, optional Display version information (default: False) verbose: str Set verbosity of runtime messages (default: v) v: verbose q: quiet ep_version: str EnergyPlus version, used to find install directory. Required if run() is called with an IDF file path rather than an IDF object. Returns ------- str : status Raises ------ CalledProcessError AttributeError If no ep_version parameter is passed when calling with an IDF file path rather than an IDF object.
[ "Wrapper", "around", "the", "EnergyPlus", "command", "line", "interface", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/runner/run_functions.py#L200-L334
santoshphilip/eppy
eppy/runner/run_functions.py
parse_error
def parse_error(output_dir): """Add contents of stderr and eplusout.err and put it in the exception message. :param output_dir: str :return: str """ sys.stderr.seek(0) std_err = sys.stderr.read().decode('utf-8') err_file = os.path.join(output_dir, "eplusout.err") if os.path.isfile(err_file): with open(err_file, "r") as f: ep_err = f.read() else: ep_err = "<File not found>" message = "\r\n{std_err}\r\nContents of EnergyPlus error file at {err_file}\r\n{ep_err}".format(**locals()) return message
python
def parse_error(output_dir): """Add contents of stderr and eplusout.err and put it in the exception message. :param output_dir: str :return: str """ sys.stderr.seek(0) std_err = sys.stderr.read().decode('utf-8') err_file = os.path.join(output_dir, "eplusout.err") if os.path.isfile(err_file): with open(err_file, "r") as f: ep_err = f.read() else: ep_err = "<File not found>" message = "\r\n{std_err}\r\nContents of EnergyPlus error file at {err_file}\r\n{ep_err}".format(**locals()) return message
[ "def", "parse_error", "(", "output_dir", ")", ":", "sys", ".", "stderr", ".", "seek", "(", "0", ")", "std_err", "=", "sys", ".", "stderr", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")", "err_file", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"eplusout.err\"", ")", "if", "os", ".", "path", ".", "isfile", "(", "err_file", ")", ":", "with", "open", "(", "err_file", ",", "\"r\"", ")", "as", "f", ":", "ep_err", "=", "f", ".", "read", "(", ")", "else", ":", "ep_err", "=", "\"<File not found>\"", "message", "=", "\"\\r\\n{std_err}\\r\\nContents of EnergyPlus error file at {err_file}\\r\\n{ep_err}\"", ".", "format", "(", "*", "*", "locals", "(", ")", ")", "return", "message" ]
Add contents of stderr and eplusout.err and put it in the exception message. :param output_dir: str :return: str
[ "Add", "contents", "of", "stderr", "and", "eplusout", ".", "err", "and", "put", "it", "in", "the", "exception", "message", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/runner/run_functions.py#L337-L352
santoshphilip/eppy
eppy/constructions/thermal_properties.py
rvalue
def rvalue(ddtt): """ R value (W/K) of a construction or material. thickness (m) / conductivity (W/m-K) """ object_type = ddtt.obj[0] if object_type == 'Construction': rvalue = INSIDE_FILM_R + OUTSIDE_FILM_R layers = ddtt.obj[2:] field_idd = ddtt.getfieldidd('Outside_Layer') validobjects = field_idd['validobjects'] for layer in layers: found = False for key in validobjects: try: rvalue += ddtt.theidf.getobject(key, layer).rvalue found = True except AttributeError: pass if not found: raise AttributeError("%s material not found in IDF" % layer) elif object_type == 'Material': thickness = ddtt.obj[ddtt.objls.index('Thickness')] conductivity = ddtt.obj[ddtt.objls.index('Conductivity')] rvalue = thickness / conductivity elif object_type == 'Material:AirGap': rvalue = ddtt.obj[ddtt.objls.index('Thermal_Resistance')] elif object_type == 'Material:InfraredTransparent': rvalue = 0 elif object_type == 'Material:NoMass': rvalue = ddtt.obj[ddtt.objls.index('Thermal_Resistance')] elif object_type == 'Material:RoofVegetation': warnings.warn( "Material:RoofVegetation thermal properties are based on dry soil", UserWarning) thickness = ddtt.obj[ddtt.objls.index('Thickness')] conductivity = ddtt.obj[ddtt.objls.index('Conductivity_of_Dry_Soil')] rvalue = thickness / conductivity else: raise AttributeError("%s rvalue property not implemented" % object_type) return rvalue
python
def rvalue(ddtt): """ R value (W/K) of a construction or material. thickness (m) / conductivity (W/m-K) """ object_type = ddtt.obj[0] if object_type == 'Construction': rvalue = INSIDE_FILM_R + OUTSIDE_FILM_R layers = ddtt.obj[2:] field_idd = ddtt.getfieldidd('Outside_Layer') validobjects = field_idd['validobjects'] for layer in layers: found = False for key in validobjects: try: rvalue += ddtt.theidf.getobject(key, layer).rvalue found = True except AttributeError: pass if not found: raise AttributeError("%s material not found in IDF" % layer) elif object_type == 'Material': thickness = ddtt.obj[ddtt.objls.index('Thickness')] conductivity = ddtt.obj[ddtt.objls.index('Conductivity')] rvalue = thickness / conductivity elif object_type == 'Material:AirGap': rvalue = ddtt.obj[ddtt.objls.index('Thermal_Resistance')] elif object_type == 'Material:InfraredTransparent': rvalue = 0 elif object_type == 'Material:NoMass': rvalue = ddtt.obj[ddtt.objls.index('Thermal_Resistance')] elif object_type == 'Material:RoofVegetation': warnings.warn( "Material:RoofVegetation thermal properties are based on dry soil", UserWarning) thickness = ddtt.obj[ddtt.objls.index('Thickness')] conductivity = ddtt.obj[ddtt.objls.index('Conductivity_of_Dry_Soil')] rvalue = thickness / conductivity else: raise AttributeError("%s rvalue property not implemented" % object_type) return rvalue
[ "def", "rvalue", "(", "ddtt", ")", ":", "object_type", "=", "ddtt", ".", "obj", "[", "0", "]", "if", "object_type", "==", "'Construction'", ":", "rvalue", "=", "INSIDE_FILM_R", "+", "OUTSIDE_FILM_R", "layers", "=", "ddtt", ".", "obj", "[", "2", ":", "]", "field_idd", "=", "ddtt", ".", "getfieldidd", "(", "'Outside_Layer'", ")", "validobjects", "=", "field_idd", "[", "'validobjects'", "]", "for", "layer", "in", "layers", ":", "found", "=", "False", "for", "key", "in", "validobjects", ":", "try", ":", "rvalue", "+=", "ddtt", ".", "theidf", ".", "getobject", "(", "key", ",", "layer", ")", ".", "rvalue", "found", "=", "True", "except", "AttributeError", ":", "pass", "if", "not", "found", ":", "raise", "AttributeError", "(", "\"%s material not found in IDF\"", "%", "layer", ")", "elif", "object_type", "==", "'Material'", ":", "thickness", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Thickness'", ")", "]", "conductivity", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Conductivity'", ")", "]", "rvalue", "=", "thickness", "/", "conductivity", "elif", "object_type", "==", "'Material:AirGap'", ":", "rvalue", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Thermal_Resistance'", ")", "]", "elif", "object_type", "==", "'Material:InfraredTransparent'", ":", "rvalue", "=", "0", "elif", "object_type", "==", "'Material:NoMass'", ":", "rvalue", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Thermal_Resistance'", ")", "]", "elif", "object_type", "==", "'Material:RoofVegetation'", ":", "warnings", ".", "warn", "(", "\"Material:RoofVegetation thermal properties are based on dry soil\"", ",", "UserWarning", ")", "thickness", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Thickness'", ")", "]", "conductivity", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Conductivity_of_Dry_Soil'", ")", "]", "rvalue", "=", "thickness", "/", "conductivity", "else", ":", "raise", "AttributeError", "(", "\"%s rvalue property not implemented\"", "%", "object_type", ")", "return", "rvalue" ]
R value (W/K) of a construction or material. thickness (m) / conductivity (W/m-K)
[ "R", "value", "(", "W", "/", "K", ")", "of", "a", "construction", "or", "material", ".", "thickness", "(", "m", ")", "/", "conductivity", "(", "W", "/", "m", "-", "K", ")" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/constructions/thermal_properties.py#L24-L64
santoshphilip/eppy
eppy/constructions/thermal_properties.py
heatcapacity
def heatcapacity(ddtt): """ Heat capacity (kJ/m2-K) of a construction or material. thickness (m) * density (kg/m3) * specific heat (J/kg-K) * 0.001 """ object_type = ddtt.obj[0] if object_type == 'Construction': heatcapacity = 0 layers = ddtt.obj[2:] field_idd = ddtt.getfieldidd('Outside_Layer') validobjects = field_idd['validobjects'] for layer in layers: found = False for key in validobjects: try: heatcapacity += ddtt.theidf.getobject(key, layer).heatcapacity found = True except AttributeError: pass if not found: raise AttributeError("%s material not found in IDF" % layer) elif object_type == 'Material': thickness = ddtt.obj[ddtt.objls.index('Thickness')] density = ddtt.obj[ddtt.objls.index('Density')] specificheat = ddtt.obj[ddtt.objls.index('Specific_Heat')] heatcapacity = thickness * density * specificheat * 0.001 elif object_type == 'Material:AirGap': heatcapacity = 0 elif object_type == 'Material:InfraredTransparent': heatcapacity = 0 elif object_type == 'Material:NoMass': warnings.warn( "Material:NoMass materials included in heat capacity calculation", UserWarning) heatcapacity = 0 elif object_type == 'Material:RoofVegetation': warnings.warn( "Material:RoofVegetation thermal properties are based on dry soil", UserWarning) thickness = ddtt.obj[ddtt.objls.index('Thickness')] density = ddtt.obj[ddtt.objls.index('Density_of_Dry_Soil')] specificheat = ddtt.obj[ddtt.objls.index('Specific_Heat_of_Dry_Soil')] heatcapacity = thickness * density * specificheat * 0.001 else: raise AttributeError("%s has no heatcapacity property" % object_type) return heatcapacity
python
def heatcapacity(ddtt): """ Heat capacity (kJ/m2-K) of a construction or material. thickness (m) * density (kg/m3) * specific heat (J/kg-K) * 0.001 """ object_type = ddtt.obj[0] if object_type == 'Construction': heatcapacity = 0 layers = ddtt.obj[2:] field_idd = ddtt.getfieldidd('Outside_Layer') validobjects = field_idd['validobjects'] for layer in layers: found = False for key in validobjects: try: heatcapacity += ddtt.theidf.getobject(key, layer).heatcapacity found = True except AttributeError: pass if not found: raise AttributeError("%s material not found in IDF" % layer) elif object_type == 'Material': thickness = ddtt.obj[ddtt.objls.index('Thickness')] density = ddtt.obj[ddtt.objls.index('Density')] specificheat = ddtt.obj[ddtt.objls.index('Specific_Heat')] heatcapacity = thickness * density * specificheat * 0.001 elif object_type == 'Material:AirGap': heatcapacity = 0 elif object_type == 'Material:InfraredTransparent': heatcapacity = 0 elif object_type == 'Material:NoMass': warnings.warn( "Material:NoMass materials included in heat capacity calculation", UserWarning) heatcapacity = 0 elif object_type == 'Material:RoofVegetation': warnings.warn( "Material:RoofVegetation thermal properties are based on dry soil", UserWarning) thickness = ddtt.obj[ddtt.objls.index('Thickness')] density = ddtt.obj[ddtt.objls.index('Density_of_Dry_Soil')] specificheat = ddtt.obj[ddtt.objls.index('Specific_Heat_of_Dry_Soil')] heatcapacity = thickness * density * specificheat * 0.001 else: raise AttributeError("%s has no heatcapacity property" % object_type) return heatcapacity
[ "def", "heatcapacity", "(", "ddtt", ")", ":", "object_type", "=", "ddtt", ".", "obj", "[", "0", "]", "if", "object_type", "==", "'Construction'", ":", "heatcapacity", "=", "0", "layers", "=", "ddtt", ".", "obj", "[", "2", ":", "]", "field_idd", "=", "ddtt", ".", "getfieldidd", "(", "'Outside_Layer'", ")", "validobjects", "=", "field_idd", "[", "'validobjects'", "]", "for", "layer", "in", "layers", ":", "found", "=", "False", "for", "key", "in", "validobjects", ":", "try", ":", "heatcapacity", "+=", "ddtt", ".", "theidf", ".", "getobject", "(", "key", ",", "layer", ")", ".", "heatcapacity", "found", "=", "True", "except", "AttributeError", ":", "pass", "if", "not", "found", ":", "raise", "AttributeError", "(", "\"%s material not found in IDF\"", "%", "layer", ")", "elif", "object_type", "==", "'Material'", ":", "thickness", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Thickness'", ")", "]", "density", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Density'", ")", "]", "specificheat", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Specific_Heat'", ")", "]", "heatcapacity", "=", "thickness", "*", "density", "*", "specificheat", "*", "0.001", "elif", "object_type", "==", "'Material:AirGap'", ":", "heatcapacity", "=", "0", "elif", "object_type", "==", "'Material:InfraredTransparent'", ":", "heatcapacity", "=", "0", "elif", "object_type", "==", "'Material:NoMass'", ":", "warnings", ".", "warn", "(", "\"Material:NoMass materials included in heat capacity calculation\"", ",", "UserWarning", ")", "heatcapacity", "=", "0", "elif", "object_type", "==", "'Material:RoofVegetation'", ":", "warnings", ".", "warn", "(", "\"Material:RoofVegetation thermal properties are based on dry soil\"", ",", "UserWarning", ")", "thickness", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Thickness'", ")", "]", "density", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Density_of_Dry_Soil'", ")", "]", "specificheat", "=", "ddtt", ".", "obj", "[", "ddtt", ".", "objls", ".", "index", "(", "'Specific_Heat_of_Dry_Soil'", ")", "]", "heatcapacity", "=", "thickness", "*", "density", "*", "specificheat", "*", "0.001", "else", ":", "raise", "AttributeError", "(", "\"%s has no heatcapacity property\"", "%", "object_type", ")", "return", "heatcapacity" ]
Heat capacity (kJ/m2-K) of a construction or material. thickness (m) * density (kg/m3) * specific heat (J/kg-K) * 0.001
[ "Heat", "capacity", "(", "kJ", "/", "m2", "-", "K", ")", "of", "a", "construction", "or", "material", ".", "thickness", "(", "m", ")", "*", "density", "(", "kg", "/", "m3", ")", "*", "specific", "heat", "(", "J", "/", "kg", "-", "K", ")", "*", "0", ".", "001" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/constructions/thermal_properties.py#L87-L132
santoshphilip/eppy
eppy/modeleditor.py
almostequal
def almostequal(first, second, places=7, printit=True): """ Test if two values are equal to a given number of places. This is based on python's unittest so may be covered by Python's license. """ if first == second: return True if round(abs(second - first), places) != 0: if printit: print(round(abs(second - first), places)) print("notalmost: %s != %s to %i places" % (first, second, places)) return False else: return True
python
def almostequal(first, second, places=7, printit=True): """ Test if two values are equal to a given number of places. This is based on python's unittest so may be covered by Python's license. """ if first == second: return True if round(abs(second - first), places) != 0: if printit: print(round(abs(second - first), places)) print("notalmost: %s != %s to %i places" % (first, second, places)) return False else: return True
[ "def", "almostequal", "(", "first", ",", "second", ",", "places", "=", "7", ",", "printit", "=", "True", ")", ":", "if", "first", "==", "second", ":", "return", "True", "if", "round", "(", "abs", "(", "second", "-", "first", ")", ",", "places", ")", "!=", "0", ":", "if", "printit", ":", "print", "(", "round", "(", "abs", "(", "second", "-", "first", ")", ",", "places", ")", ")", "print", "(", "\"notalmost: %s != %s to %i places\"", "%", "(", "first", ",", "second", ",", "places", ")", ")", "return", "False", "else", ":", "return", "True" ]
Test if two values are equal to a given number of places. This is based on python's unittest so may be covered by Python's license.
[ "Test", "if", "two", "values", "are", "equal", "to", "a", "given", "number", "of", "places", ".", "This", "is", "based", "on", "python", "s", "unittest", "so", "may", "be", "covered", "by", "Python", "s", "license", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L58-L74
santoshphilip/eppy
eppy/modeleditor.py
newrawobject
def newrawobject(data, commdct, key, block=None, defaultvalues=True): """Make a new object for the given key. Parameters ---------- data : Eplusdata object Data dictionary and list of objects for the entire model. commdct : list of dicts Comments from the IDD file describing each item type in `data`. key : str Object type of the object to add (in ALL_CAPS). Returns ------- list A list of field values for the new object. """ dtls = data.dtls key = key.upper() key_i = dtls.index(key) key_comm = commdct[key_i] # set default values if defaultvalues: obj = [comm.get('default', [''])[0] for comm in key_comm] else: obj = ['' for comm in key_comm] if not block: inblock = ['does not start with N'] * len(obj) else: inblock = block[key_i] for i, (f_comm, f_val, f_iddname) in enumerate(zip(key_comm, obj, inblock)): if i == 0: obj[i] = key else: obj[i] = convertafield(f_comm, f_val, f_iddname) obj = poptrailing(obj) # remove the blank items in a repeating field. return obj
python
def newrawobject(data, commdct, key, block=None, defaultvalues=True): """Make a new object for the given key. Parameters ---------- data : Eplusdata object Data dictionary and list of objects for the entire model. commdct : list of dicts Comments from the IDD file describing each item type in `data`. key : str Object type of the object to add (in ALL_CAPS). Returns ------- list A list of field values for the new object. """ dtls = data.dtls key = key.upper() key_i = dtls.index(key) key_comm = commdct[key_i] # set default values if defaultvalues: obj = [comm.get('default', [''])[0] for comm in key_comm] else: obj = ['' for comm in key_comm] if not block: inblock = ['does not start with N'] * len(obj) else: inblock = block[key_i] for i, (f_comm, f_val, f_iddname) in enumerate(zip(key_comm, obj, inblock)): if i == 0: obj[i] = key else: obj[i] = convertafield(f_comm, f_val, f_iddname) obj = poptrailing(obj) # remove the blank items in a repeating field. return obj
[ "def", "newrawobject", "(", "data", ",", "commdct", ",", "key", ",", "block", "=", "None", ",", "defaultvalues", "=", "True", ")", ":", "dtls", "=", "data", ".", "dtls", "key", "=", "key", ".", "upper", "(", ")", "key_i", "=", "dtls", ".", "index", "(", "key", ")", "key_comm", "=", "commdct", "[", "key_i", "]", "# set default values", "if", "defaultvalues", ":", "obj", "=", "[", "comm", ".", "get", "(", "'default'", ",", "[", "''", "]", ")", "[", "0", "]", "for", "comm", "in", "key_comm", "]", "else", ":", "obj", "=", "[", "''", "for", "comm", "in", "key_comm", "]", "if", "not", "block", ":", "inblock", "=", "[", "'does not start with N'", "]", "*", "len", "(", "obj", ")", "else", ":", "inblock", "=", "block", "[", "key_i", "]", "for", "i", ",", "(", "f_comm", ",", "f_val", ",", "f_iddname", ")", "in", "enumerate", "(", "zip", "(", "key_comm", ",", "obj", ",", "inblock", ")", ")", ":", "if", "i", "==", "0", ":", "obj", "[", "i", "]", "=", "key", "else", ":", "obj", "[", "i", "]", "=", "convertafield", "(", "f_comm", ",", "f_val", ",", "f_iddname", ")", "obj", "=", "poptrailing", "(", "obj", ")", "# remove the blank items in a repeating field.", "return", "obj" ]
Make a new object for the given key. Parameters ---------- data : Eplusdata object Data dictionary and list of objects for the entire model. commdct : list of dicts Comments from the IDD file describing each item type in `data`. key : str Object type of the object to add (in ALL_CAPS). Returns ------- list A list of field values for the new object.
[ "Make", "a", "new", "object", "for", "the", "given", "key", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L95-L133
santoshphilip/eppy
eppy/modeleditor.py
addthisbunch
def addthisbunch(bunchdt, data, commdct, thisbunch, theidf): """add a bunch to model. abunch usually comes from another idf file or it can be used to copy within the idf file""" key = thisbunch.key.upper() obj = copy.copy(thisbunch.obj) abunch = obj2bunch(data, commdct, obj) bunchdt[key].append(abunch) return abunch
python
def addthisbunch(bunchdt, data, commdct, thisbunch, theidf): """add a bunch to model. abunch usually comes from another idf file or it can be used to copy within the idf file""" key = thisbunch.key.upper() obj = copy.copy(thisbunch.obj) abunch = obj2bunch(data, commdct, obj) bunchdt[key].append(abunch) return abunch
[ "def", "addthisbunch", "(", "bunchdt", ",", "data", ",", "commdct", ",", "thisbunch", ",", "theidf", ")", ":", "key", "=", "thisbunch", ".", "key", ".", "upper", "(", ")", "obj", "=", "copy", ".", "copy", "(", "thisbunch", ".", "obj", ")", "abunch", "=", "obj2bunch", "(", "data", ",", "commdct", ",", "obj", ")", "bunchdt", "[", "key", "]", ".", "append", "(", "abunch", ")", "return", "abunch" ]
add a bunch to model. abunch usually comes from another idf file or it can be used to copy within the idf file
[ "add", "a", "bunch", "to", "model", ".", "abunch", "usually", "comes", "from", "another", "idf", "file", "or", "it", "can", "be", "used", "to", "copy", "within", "the", "idf", "file" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L136-L144
santoshphilip/eppy
eppy/modeleditor.py
obj2bunch
def obj2bunch(data, commdct, obj): """make a new bunch object using the data object""" dtls = data.dtls key = obj[0].upper() key_i = dtls.index(key) abunch = makeabunch(commdct, obj, key_i) return abunch
python
def obj2bunch(data, commdct, obj): """make a new bunch object using the data object""" dtls = data.dtls key = obj[0].upper() key_i = dtls.index(key) abunch = makeabunch(commdct, obj, key_i) return abunch
[ "def", "obj2bunch", "(", "data", ",", "commdct", ",", "obj", ")", ":", "dtls", "=", "data", ".", "dtls", "key", "=", "obj", "[", "0", "]", ".", "upper", "(", ")", "key_i", "=", "dtls", ".", "index", "(", "key", ")", "abunch", "=", "makeabunch", "(", "commdct", ",", "obj", ",", "key_i", ")", "return", "abunch" ]
make a new bunch object using the data object
[ "make", "a", "new", "bunch", "object", "using", "the", "data", "object" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L147-L153
santoshphilip/eppy
eppy/modeleditor.py
addobject
def addobject(bunchdt, data, commdct, key, theidf, aname=None, **kwargs): """add an object to the eplus model""" obj = newrawobject(data, commdct, key) abunch = obj2bunch(data, commdct, obj) if aname: namebunch(abunch, aname) data.dt[key].append(obj) bunchdt[key].append(abunch) for key, value in list(kwargs.items()): abunch[key] = value return abunch
python
def addobject(bunchdt, data, commdct, key, theidf, aname=None, **kwargs): """add an object to the eplus model""" obj = newrawobject(data, commdct, key) abunch = obj2bunch(data, commdct, obj) if aname: namebunch(abunch, aname) data.dt[key].append(obj) bunchdt[key].append(abunch) for key, value in list(kwargs.items()): abunch[key] = value return abunch
[ "def", "addobject", "(", "bunchdt", ",", "data", ",", "commdct", ",", "key", ",", "theidf", ",", "aname", "=", "None", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "newrawobject", "(", "data", ",", "commdct", ",", "key", ")", "abunch", "=", "obj2bunch", "(", "data", ",", "commdct", ",", "obj", ")", "if", "aname", ":", "namebunch", "(", "abunch", ",", "aname", ")", "data", ".", "dt", "[", "key", "]", ".", "append", "(", "obj", ")", "bunchdt", "[", "key", "]", ".", "append", "(", "abunch", ")", "for", "key", ",", "value", "in", "list", "(", "kwargs", ".", "items", "(", ")", ")", ":", "abunch", "[", "key", "]", "=", "value", "return", "abunch" ]
add an object to the eplus model
[ "add", "an", "object", "to", "the", "eplus", "model" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L165-L175
santoshphilip/eppy
eppy/modeleditor.py
getnamedargs
def getnamedargs(*args, **kwargs): """allows you to pass a dict and named args so you can pass ({'a':5, 'b':3}, c=8) and get dict(a=5, b=3, c=8)""" adict = {} for arg in args: if isinstance(arg, dict): adict.update(arg) adict.update(kwargs) return adict
python
def getnamedargs(*args, **kwargs): """allows you to pass a dict and named args so you can pass ({'a':5, 'b':3}, c=8) and get dict(a=5, b=3, c=8)""" adict = {} for arg in args: if isinstance(arg, dict): adict.update(arg) adict.update(kwargs) return adict
[ "def", "getnamedargs", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "adict", "=", "{", "}", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "dict", ")", ":", "adict", ".", "update", "(", "arg", ")", "adict", ".", "update", "(", "kwargs", ")", "return", "adict" ]
allows you to pass a dict and named args so you can pass ({'a':5, 'b':3}, c=8) and get dict(a=5, b=3, c=8)
[ "allows", "you", "to", "pass", "a", "dict", "and", "named", "args", "so", "you", "can", "pass", "(", "{", "a", ":", "5", "b", ":", "3", "}", "c", "=", "8", ")", "and", "get", "dict", "(", "a", "=", "5", "b", "=", "3", "c", "=", "8", ")" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L178-L187
santoshphilip/eppy
eppy/modeleditor.py
addobject1
def addobject1(bunchdt, data, commdct, key, **kwargs): """add an object to the eplus model""" obj = newrawobject(data, commdct, key) abunch = obj2bunch(data, commdct, obj) data.dt[key].append(obj) bunchdt[key].append(abunch) # adict = getnamedargs(*args, **kwargs) for kkey, value in iteritems(kwargs): abunch[kkey] = value return abunch
python
def addobject1(bunchdt, data, commdct, key, **kwargs): """add an object to the eplus model""" obj = newrawobject(data, commdct, key) abunch = obj2bunch(data, commdct, obj) data.dt[key].append(obj) bunchdt[key].append(abunch) # adict = getnamedargs(*args, **kwargs) for kkey, value in iteritems(kwargs): abunch[kkey] = value return abunch
[ "def", "addobject1", "(", "bunchdt", ",", "data", ",", "commdct", ",", "key", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "newrawobject", "(", "data", ",", "commdct", ",", "key", ")", "abunch", "=", "obj2bunch", "(", "data", ",", "commdct", ",", "obj", ")", "data", ".", "dt", "[", "key", "]", ".", "append", "(", "obj", ")", "bunchdt", "[", "key", "]", ".", "append", "(", "abunch", ")", "# adict = getnamedargs(*args, **kwargs)", "for", "kkey", ",", "value", "in", "iteritems", "(", "kwargs", ")", ":", "abunch", "[", "kkey", "]", "=", "value", "return", "abunch" ]
add an object to the eplus model
[ "add", "an", "object", "to", "the", "eplus", "model" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L190-L199
santoshphilip/eppy
eppy/modeleditor.py
getobject
def getobject(bunchdt, key, name): """get the object if you have the key and the name returns a list of objects, in case you have more than one You should not have more than one""" # TODO : throw exception if more than one object, or return more objects idfobjects = bunchdt[key] if idfobjects: # second item in list is a unique ID unique_id = idfobjects[0].objls[1] theobjs = [idfobj for idfobj in idfobjects if idfobj[unique_id].upper() == name.upper()] try: return theobjs[0] except IndexError: return None
python
def getobject(bunchdt, key, name): """get the object if you have the key and the name returns a list of objects, in case you have more than one You should not have more than one""" # TODO : throw exception if more than one object, or return more objects idfobjects = bunchdt[key] if idfobjects: # second item in list is a unique ID unique_id = idfobjects[0].objls[1] theobjs = [idfobj for idfobj in idfobjects if idfobj[unique_id].upper() == name.upper()] try: return theobjs[0] except IndexError: return None
[ "def", "getobject", "(", "bunchdt", ",", "key", ",", "name", ")", ":", "# TODO : throw exception if more than one object, or return more objects", "idfobjects", "=", "bunchdt", "[", "key", "]", "if", "idfobjects", ":", "# second item in list is a unique ID", "unique_id", "=", "idfobjects", "[", "0", "]", ".", "objls", "[", "1", "]", "theobjs", "=", "[", "idfobj", "for", "idfobj", "in", "idfobjects", "if", "idfobj", "[", "unique_id", "]", ".", "upper", "(", ")", "==", "name", ".", "upper", "(", ")", "]", "try", ":", "return", "theobjs", "[", "0", "]", "except", "IndexError", ":", "return", "None" ]
get the object if you have the key and the name returns a list of objects, in case you have more than one You should not have more than one
[ "get", "the", "object", "if", "you", "have", "the", "key", "and", "the", "name", "returns", "a", "list", "of", "objects", "in", "case", "you", "have", "more", "than", "one", "You", "should", "not", "have", "more", "than", "one" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L202-L216
santoshphilip/eppy
eppy/modeleditor.py
__objecthasfields
def __objecthasfields(bunchdt, data, commdct, idfobject, places=7, **kwargs): """test if the idf object has the field values in kwargs""" for key, value in list(kwargs.items()): if not isfieldvalue( bunchdt, data, commdct, idfobject, key, value, places=places): return False return True
python
def __objecthasfields(bunchdt, data, commdct, idfobject, places=7, **kwargs): """test if the idf object has the field values in kwargs""" for key, value in list(kwargs.items()): if not isfieldvalue( bunchdt, data, commdct, idfobject, key, value, places=places): return False return True
[ "def", "__objecthasfields", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobject", ",", "places", "=", "7", ",", "*", "*", "kwargs", ")", ":", "for", "key", ",", "value", "in", "list", "(", "kwargs", ".", "items", "(", ")", ")", ":", "if", "not", "isfieldvalue", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobject", ",", "key", ",", "value", ",", "places", "=", "places", ")", ":", "return", "False", "return", "True" ]
test if the idf object has the field values in kwargs
[ "test", "if", "the", "idf", "object", "has", "the", "field", "values", "in", "kwargs" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L219-L226
santoshphilip/eppy
eppy/modeleditor.py
getobjects
def getobjects(bunchdt, data, commdct, key, places=7, **kwargs): """get all the objects of key that matches the fields in **kwargs""" idfobjects = bunchdt[key] allobjs = [] for obj in idfobjects: if __objecthasfields( bunchdt, data, commdct, obj, places=places, **kwargs): allobjs.append(obj) return allobjs
python
def getobjects(bunchdt, data, commdct, key, places=7, **kwargs): """get all the objects of key that matches the fields in **kwargs""" idfobjects = bunchdt[key] allobjs = [] for obj in idfobjects: if __objecthasfields( bunchdt, data, commdct, obj, places=places, **kwargs): allobjs.append(obj) return allobjs
[ "def", "getobjects", "(", "bunchdt", ",", "data", ",", "commdct", ",", "key", ",", "places", "=", "7", ",", "*", "*", "kwargs", ")", ":", "idfobjects", "=", "bunchdt", "[", "key", "]", "allobjs", "=", "[", "]", "for", "obj", "in", "idfobjects", ":", "if", "__objecthasfields", "(", "bunchdt", ",", "data", ",", "commdct", ",", "obj", ",", "places", "=", "places", ",", "*", "*", "kwargs", ")", ":", "allobjs", ".", "append", "(", "obj", ")", "return", "allobjs" ]
get all the objects of key that matches the fields in **kwargs
[ "get", "all", "the", "objects", "of", "key", "that", "matches", "the", "fields", "in", "**", "kwargs" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L229-L238
santoshphilip/eppy
eppy/modeleditor.py
iddofobject
def iddofobject(data, commdct, key): """from commdct, return the idd of the object key""" dtls = data.dtls i = dtls.index(key) return commdct[i]
python
def iddofobject(data, commdct, key): """from commdct, return the idd of the object key""" dtls = data.dtls i = dtls.index(key) return commdct[i]
[ "def", "iddofobject", "(", "data", ",", "commdct", ",", "key", ")", ":", "dtls", "=", "data", ".", "dtls", "i", "=", "dtls", ".", "index", "(", "key", ")", "return", "commdct", "[", "i", "]" ]
from commdct, return the idd of the object key
[ "from", "commdct", "return", "the", "idd", "of", "the", "object", "key" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L241-L245
santoshphilip/eppy
eppy/modeleditor.py
getextensibleindex
def getextensibleindex(bunchdt, data, commdct, key, objname): """get the index of the first extensible item""" theobject = getobject(bunchdt, key, objname) if theobject == None: return None theidd = iddofobject(data, commdct, key) extensible_i = [ i for i in range(len(theidd)) if 'begin-extensible' in theidd[i]] try: extensible_i = extensible_i[0] except IndexError: return theobject
python
def getextensibleindex(bunchdt, data, commdct, key, objname): """get the index of the first extensible item""" theobject = getobject(bunchdt, key, objname) if theobject == None: return None theidd = iddofobject(data, commdct, key) extensible_i = [ i for i in range(len(theidd)) if 'begin-extensible' in theidd[i]] try: extensible_i = extensible_i[0] except IndexError: return theobject
[ "def", "getextensibleindex", "(", "bunchdt", ",", "data", ",", "commdct", ",", "key", ",", "objname", ")", ":", "theobject", "=", "getobject", "(", "bunchdt", ",", "key", ",", "objname", ")", "if", "theobject", "==", "None", ":", "return", "None", "theidd", "=", "iddofobject", "(", "data", ",", "commdct", ",", "key", ")", "extensible_i", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "theidd", ")", ")", "if", "'begin-extensible'", "in", "theidd", "[", "i", "]", "]", "try", ":", "extensible_i", "=", "extensible_i", "[", "0", "]", "except", "IndexError", ":", "return", "theobject" ]
get the index of the first extensible item
[ "get", "the", "index", "of", "the", "first", "extensible", "item" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L248-L259
santoshphilip/eppy
eppy/modeleditor.py
removeextensibles
def removeextensibles(bunchdt, data, commdct, key, objname): """remove the extensible items in the object""" theobject = getobject(bunchdt, key, objname) if theobject == None: return theobject theidd = iddofobject(data, commdct, key) extensible_i = [ i for i in range(len(theidd)) if 'begin-extensible' in theidd[i]] try: extensible_i = extensible_i[0] except IndexError: return theobject while True: try: popped = theobject.obj.pop(extensible_i) except IndexError: break return theobject
python
def removeextensibles(bunchdt, data, commdct, key, objname): """remove the extensible items in the object""" theobject = getobject(bunchdt, key, objname) if theobject == None: return theobject theidd = iddofobject(data, commdct, key) extensible_i = [ i for i in range(len(theidd)) if 'begin-extensible' in theidd[i]] try: extensible_i = extensible_i[0] except IndexError: return theobject while True: try: popped = theobject.obj.pop(extensible_i) except IndexError: break return theobject
[ "def", "removeextensibles", "(", "bunchdt", ",", "data", ",", "commdct", ",", "key", ",", "objname", ")", ":", "theobject", "=", "getobject", "(", "bunchdt", ",", "key", ",", "objname", ")", "if", "theobject", "==", "None", ":", "return", "theobject", "theidd", "=", "iddofobject", "(", "data", ",", "commdct", ",", "key", ")", "extensible_i", "=", "[", "i", "for", "i", "in", "range", "(", "len", "(", "theidd", ")", ")", "if", "'begin-extensible'", "in", "theidd", "[", "i", "]", "]", "try", ":", "extensible_i", "=", "extensible_i", "[", "0", "]", "except", "IndexError", ":", "return", "theobject", "while", "True", ":", "try", ":", "popped", "=", "theobject", ".", "obj", ".", "pop", "(", "extensible_i", ")", "except", "IndexError", ":", "break", "return", "theobject" ]
remove the extensible items in the object
[ "remove", "the", "extensible", "items", "in", "the", "object" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L262-L279
santoshphilip/eppy
eppy/modeleditor.py
getfieldcomm
def getfieldcomm(bunchdt, data, commdct, idfobject, fieldname): """get the idd comment for the field""" key = idfobject.obj[0].upper() keyi = data.dtls.index(key) fieldi = idfobject.objls.index(fieldname) thiscommdct = commdct[keyi][fieldi] return thiscommdct
python
def getfieldcomm(bunchdt, data, commdct, idfobject, fieldname): """get the idd comment for the field""" key = idfobject.obj[0].upper() keyi = data.dtls.index(key) fieldi = idfobject.objls.index(fieldname) thiscommdct = commdct[keyi][fieldi] return thiscommdct
[ "def", "getfieldcomm", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobject", ",", "fieldname", ")", ":", "key", "=", "idfobject", ".", "obj", "[", "0", "]", ".", "upper", "(", ")", "keyi", "=", "data", ".", "dtls", ".", "index", "(", "key", ")", "fieldi", "=", "idfobject", ".", "objls", ".", "index", "(", "fieldname", ")", "thiscommdct", "=", "commdct", "[", "keyi", "]", "[", "fieldi", "]", "return", "thiscommdct" ]
get the idd comment for the field
[ "get", "the", "idd", "comment", "for", "the", "field" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L282-L288
santoshphilip/eppy
eppy/modeleditor.py
is_retaincase
def is_retaincase(bunchdt, data, commdct, idfobject, fieldname): """test if case has to be retained for that field""" thiscommdct = getfieldcomm(bunchdt, data, commdct, idfobject, fieldname) return 'retaincase' in thiscommdct
python
def is_retaincase(bunchdt, data, commdct, idfobject, fieldname): """test if case has to be retained for that field""" thiscommdct = getfieldcomm(bunchdt, data, commdct, idfobject, fieldname) return 'retaincase' in thiscommdct
[ "def", "is_retaincase", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobject", ",", "fieldname", ")", ":", "thiscommdct", "=", "getfieldcomm", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobject", ",", "fieldname", ")", "return", "'retaincase'", "in", "thiscommdct" ]
test if case has to be retained for that field
[ "test", "if", "case", "has", "to", "be", "retained", "for", "that", "field" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L291-L294
santoshphilip/eppy
eppy/modeleditor.py
isfieldvalue
def isfieldvalue(bunchdt, data, commdct, idfobj, fieldname, value, places=7): """test if idfobj.field == value""" # do a quick type check # if type(idfobj[fieldname]) != type(value): # return False # takes care of autocalculate and real # check float thiscommdct = getfieldcomm(bunchdt, data, commdct, idfobj, fieldname) if 'type' in thiscommdct: if thiscommdct['type'][0] in ('real', 'integer'): # test for autocalculate try: if idfobj[fieldname].upper() == 'AUTOCALCULATE': if value.upper() == 'AUTOCALCULATE': return True except AttributeError: pass return almostequal(float(idfobj[fieldname]), float(value), places, False) # check retaincase if is_retaincase(bunchdt, data, commdct, idfobj, fieldname): return idfobj[fieldname] == value else: return idfobj[fieldname].upper() == value.upper()
python
def isfieldvalue(bunchdt, data, commdct, idfobj, fieldname, value, places=7): """test if idfobj.field == value""" # do a quick type check # if type(idfobj[fieldname]) != type(value): # return False # takes care of autocalculate and real # check float thiscommdct = getfieldcomm(bunchdt, data, commdct, idfobj, fieldname) if 'type' in thiscommdct: if thiscommdct['type'][0] in ('real', 'integer'): # test for autocalculate try: if idfobj[fieldname].upper() == 'AUTOCALCULATE': if value.upper() == 'AUTOCALCULATE': return True except AttributeError: pass return almostequal(float(idfobj[fieldname]), float(value), places, False) # check retaincase if is_retaincase(bunchdt, data, commdct, idfobj, fieldname): return idfobj[fieldname] == value else: return idfobj[fieldname].upper() == value.upper()
[ "def", "isfieldvalue", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobj", ",", "fieldname", ",", "value", ",", "places", "=", "7", ")", ":", "# do a quick type check", "# if type(idfobj[fieldname]) != type(value):", "# return False # takes care of autocalculate and real", "# check float", "thiscommdct", "=", "getfieldcomm", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobj", ",", "fieldname", ")", "if", "'type'", "in", "thiscommdct", ":", "if", "thiscommdct", "[", "'type'", "]", "[", "0", "]", "in", "(", "'real'", ",", "'integer'", ")", ":", "# test for autocalculate", "try", ":", "if", "idfobj", "[", "fieldname", "]", ".", "upper", "(", ")", "==", "'AUTOCALCULATE'", ":", "if", "value", ".", "upper", "(", ")", "==", "'AUTOCALCULATE'", ":", "return", "True", "except", "AttributeError", ":", "pass", "return", "almostequal", "(", "float", "(", "idfobj", "[", "fieldname", "]", ")", ",", "float", "(", "value", ")", ",", "places", ",", "False", ")", "# check retaincase", "if", "is_retaincase", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobj", ",", "fieldname", ")", ":", "return", "idfobj", "[", "fieldname", "]", "==", "value", "else", ":", "return", "idfobj", "[", "fieldname", "]", ".", "upper", "(", ")", "==", "value", ".", "upper", "(", ")" ]
test if idfobj.field == value
[ "test", "if", "idfobj", ".", "field", "==", "value" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L297-L318
santoshphilip/eppy
eppy/modeleditor.py
equalfield
def equalfield(bunchdt, data, commdct, idfobj1, idfobj2, fieldname, places=7): """returns true if the two fields are equal will test for retaincase places is used if the field is float/real""" # TODO test if both objects are of same type key1 = idfobj1.obj[0].upper() key2 = idfobj2.obj[0].upper() if key1 != key2: raise NotSameObjectError vee2 = idfobj2[fieldname] return isfieldvalue( bunchdt, data, commdct, idfobj1, fieldname, vee2, places=places)
python
def equalfield(bunchdt, data, commdct, idfobj1, idfobj2, fieldname, places=7): """returns true if the two fields are equal will test for retaincase places is used if the field is float/real""" # TODO test if both objects are of same type key1 = idfobj1.obj[0].upper() key2 = idfobj2.obj[0].upper() if key1 != key2: raise NotSameObjectError vee2 = idfobj2[fieldname] return isfieldvalue( bunchdt, data, commdct, idfobj1, fieldname, vee2, places=places)
[ "def", "equalfield", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobj1", ",", "idfobj2", ",", "fieldname", ",", "places", "=", "7", ")", ":", "# TODO test if both objects are of same type", "key1", "=", "idfobj1", ".", "obj", "[", "0", "]", ".", "upper", "(", ")", "key2", "=", "idfobj2", ".", "obj", "[", "0", "]", ".", "upper", "(", ")", "if", "key1", "!=", "key2", ":", "raise", "NotSameObjectError", "vee2", "=", "idfobj2", "[", "fieldname", "]", "return", "isfieldvalue", "(", "bunchdt", ",", "data", ",", "commdct", ",", "idfobj1", ",", "fieldname", ",", "vee2", ",", "places", "=", "places", ")" ]
returns true if the two fields are equal will test for retaincase places is used if the field is float/real
[ "returns", "true", "if", "the", "two", "fields", "are", "equal", "will", "test", "for", "retaincase", "places", "is", "used", "if", "the", "field", "is", "float", "/", "real" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L321-L333
santoshphilip/eppy
eppy/modeleditor.py
getrefnames
def getrefnames(idf, objname): """get the reference names for this object""" iddinfo = idf.idd_info dtls = idf.model.dtls index = dtls.index(objname) fieldidds = iddinfo[index] for fieldidd in fieldidds: if 'field' in fieldidd: if fieldidd['field'][0].endswith('Name'): if 'reference' in fieldidd: return fieldidd['reference'] else: return []
python
def getrefnames(idf, objname): """get the reference names for this object""" iddinfo = idf.idd_info dtls = idf.model.dtls index = dtls.index(objname) fieldidds = iddinfo[index] for fieldidd in fieldidds: if 'field' in fieldidd: if fieldidd['field'][0].endswith('Name'): if 'reference' in fieldidd: return fieldidd['reference'] else: return []
[ "def", "getrefnames", "(", "idf", ",", "objname", ")", ":", "iddinfo", "=", "idf", ".", "idd_info", "dtls", "=", "idf", ".", "model", ".", "dtls", "index", "=", "dtls", ".", "index", "(", "objname", ")", "fieldidds", "=", "iddinfo", "[", "index", "]", "for", "fieldidd", "in", "fieldidds", ":", "if", "'field'", "in", "fieldidd", ":", "if", "fieldidd", "[", "'field'", "]", "[", "0", "]", ".", "endswith", "(", "'Name'", ")", ":", "if", "'reference'", "in", "fieldidd", ":", "return", "fieldidd", "[", "'reference'", "]", "else", ":", "return", "[", "]" ]
get the reference names for this object
[ "get", "the", "reference", "names", "for", "this", "object" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L336-L348
santoshphilip/eppy
eppy/modeleditor.py
getallobjlists
def getallobjlists(idf, refname): """get all object-list fields for refname return a list: [('OBJKEY', refname, fieldindexlist), ...] where fieldindexlist = index of the field where the object-list = refname """ dtls = idf.model.dtls objlists = [] for i, fieldidds in enumerate(idf.idd_info): indexlist = [] for j, fieldidd in enumerate(fieldidds): if 'object-list' in fieldidd: if fieldidd['object-list'][0].upper() == refname.upper(): indexlist.append(j) if indexlist != []: objkey = dtls[i] objlists.append((objkey, refname, indexlist)) return objlists
python
def getallobjlists(idf, refname): """get all object-list fields for refname return a list: [('OBJKEY', refname, fieldindexlist), ...] where fieldindexlist = index of the field where the object-list = refname """ dtls = idf.model.dtls objlists = [] for i, fieldidds in enumerate(idf.idd_info): indexlist = [] for j, fieldidd in enumerate(fieldidds): if 'object-list' in fieldidd: if fieldidd['object-list'][0].upper() == refname.upper(): indexlist.append(j) if indexlist != []: objkey = dtls[i] objlists.append((objkey, refname, indexlist)) return objlists
[ "def", "getallobjlists", "(", "idf", ",", "refname", ")", ":", "dtls", "=", "idf", ".", "model", ".", "dtls", "objlists", "=", "[", "]", "for", "i", ",", "fieldidds", "in", "enumerate", "(", "idf", ".", "idd_info", ")", ":", "indexlist", "=", "[", "]", "for", "j", ",", "fieldidd", "in", "enumerate", "(", "fieldidds", ")", ":", "if", "'object-list'", "in", "fieldidd", ":", "if", "fieldidd", "[", "'object-list'", "]", "[", "0", "]", ".", "upper", "(", ")", "==", "refname", ".", "upper", "(", ")", ":", "indexlist", ".", "append", "(", "j", ")", "if", "indexlist", "!=", "[", "]", ":", "objkey", "=", "dtls", "[", "i", "]", "objlists", ".", "append", "(", "(", "objkey", ",", "refname", ",", "indexlist", ")", ")", "return", "objlists" ]
get all object-list fields for refname return a list: [('OBJKEY', refname, fieldindexlist), ...] where fieldindexlist = index of the field where the object-list = refname
[ "get", "all", "object", "-", "list", "fields", "for", "refname", "return", "a", "list", ":", "[", "(", "OBJKEY", "refname", "fieldindexlist", ")", "...", "]", "where", "fieldindexlist", "=", "index", "of", "the", "field", "where", "the", "object", "-", "list", "=", "refname" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L351-L368
santoshphilip/eppy
eppy/modeleditor.py
rename
def rename(idf, objkey, objname, newname): """rename all the refrences to this objname""" refnames = getrefnames(idf, objkey) for refname in refnames: objlists = getallobjlists(idf, refname) # [('OBJKEY', refname, fieldindexlist), ...] for refname in refnames: # TODO : there seems to be a duplication in this loop. Check. # refname appears in both loops for robjkey, refname, fieldindexlist in objlists: idfobjects = idf.idfobjects[robjkey] for idfobject in idfobjects: for findex in fieldindexlist: # for each field if idfobject[idfobject.objls[findex]] == objname: idfobject[idfobject.objls[findex]] = newname theobject = idf.getobject(objkey, objname) fieldname = [item for item in theobject.objls if item.endswith('Name')][0] theobject[fieldname] = newname return theobject
python
def rename(idf, objkey, objname, newname): """rename all the refrences to this objname""" refnames = getrefnames(idf, objkey) for refname in refnames: objlists = getallobjlists(idf, refname) # [('OBJKEY', refname, fieldindexlist), ...] for refname in refnames: # TODO : there seems to be a duplication in this loop. Check. # refname appears in both loops for robjkey, refname, fieldindexlist in objlists: idfobjects = idf.idfobjects[robjkey] for idfobject in idfobjects: for findex in fieldindexlist: # for each field if idfobject[idfobject.objls[findex]] == objname: idfobject[idfobject.objls[findex]] = newname theobject = idf.getobject(objkey, objname) fieldname = [item for item in theobject.objls if item.endswith('Name')][0] theobject[fieldname] = newname return theobject
[ "def", "rename", "(", "idf", ",", "objkey", ",", "objname", ",", "newname", ")", ":", "refnames", "=", "getrefnames", "(", "idf", ",", "objkey", ")", "for", "refname", "in", "refnames", ":", "objlists", "=", "getallobjlists", "(", "idf", ",", "refname", ")", "# [('OBJKEY', refname, fieldindexlist), ...]", "for", "refname", "in", "refnames", ":", "# TODO : there seems to be a duplication in this loop. Check.", "# refname appears in both loops", "for", "robjkey", ",", "refname", ",", "fieldindexlist", "in", "objlists", ":", "idfobjects", "=", "idf", ".", "idfobjects", "[", "robjkey", "]", "for", "idfobject", "in", "idfobjects", ":", "for", "findex", "in", "fieldindexlist", ":", "# for each field", "if", "idfobject", "[", "idfobject", ".", "objls", "[", "findex", "]", "]", "==", "objname", ":", "idfobject", "[", "idfobject", ".", "objls", "[", "findex", "]", "]", "=", "newname", "theobject", "=", "idf", ".", "getobject", "(", "objkey", ",", "objname", ")", "fieldname", "=", "[", "item", "for", "item", "in", "theobject", ".", "objls", "if", "item", ".", "endswith", "(", "'Name'", ")", "]", "[", "0", "]", "theobject", "[", "fieldname", "]", "=", "newname", "return", "theobject" ]
rename all the refrences to this objname
[ "rename", "all", "the", "refrences", "to", "this", "objname" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L371-L389
santoshphilip/eppy
eppy/modeleditor.py
zonearea
def zonearea(idf, zonename, debug=False): """zone area""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR'] if debug: print(len(floors)) print([floor.area for floor in floors]) # area = sum([floor.area for floor in floors]) if floors != []: area = zonearea_floor(idf, zonename) else: area = zonearea_roofceiling(idf, zonename) return area
python
def zonearea(idf, zonename, debug=False): """zone area""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR'] if debug: print(len(floors)) print([floor.area for floor in floors]) # area = sum([floor.area for floor in floors]) if floors != []: area = zonearea_floor(idf, zonename) else: area = zonearea_roofceiling(idf, zonename) return area
[ "def", "zonearea", "(", "idf", ",", "zonename", ",", "debug", "=", "False", ")", ":", "zone", "=", "idf", ".", "getobject", "(", "'ZONE'", ",", "zonename", ")", "surfs", "=", "idf", ".", "idfobjects", "[", "'BuildingSurface:Detailed'", ".", "upper", "(", ")", "]", "zone_surfs", "=", "[", "s", "for", "s", "in", "surfs", "if", "s", ".", "Zone_Name", "==", "zone", ".", "Name", "]", "floors", "=", "[", "s", "for", "s", "in", "zone_surfs", "if", "s", ".", "Surface_Type", ".", "upper", "(", ")", "==", "'FLOOR'", "]", "if", "debug", ":", "print", "(", "len", "(", "floors", ")", ")", "print", "(", "[", "floor", ".", "area", "for", "floor", "in", "floors", "]", ")", "# area = sum([floor.area for floor in floors])", "if", "floors", "!=", "[", "]", ":", "area", "=", "zonearea_floor", "(", "idf", ",", "zonename", ")", "else", ":", "area", "=", "zonearea_roofceiling", "(", "idf", ",", "zonename", ")", "return", "area" ]
zone area
[ "zone", "area" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L392-L406
santoshphilip/eppy
eppy/modeleditor.py
zone_height_min2max
def zone_height_min2max(idf, zonename, debug=False): """zone height = max-min""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] surf_xyzs = [eppy.function_helpers.getcoords(s) for s in zone_surfs] surf_xyzs = list(itertools.chain(*surf_xyzs)) surf_zs = [z for x, y, z in surf_xyzs] topz = max(surf_zs) botz = min(surf_zs) height = topz - botz return height
python
def zone_height_min2max(idf, zonename, debug=False): """zone height = max-min""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] surf_xyzs = [eppy.function_helpers.getcoords(s) for s in zone_surfs] surf_xyzs = list(itertools.chain(*surf_xyzs)) surf_zs = [z for x, y, z in surf_xyzs] topz = max(surf_zs) botz = min(surf_zs) height = topz - botz return height
[ "def", "zone_height_min2max", "(", "idf", ",", "zonename", ",", "debug", "=", "False", ")", ":", "zone", "=", "idf", ".", "getobject", "(", "'ZONE'", ",", "zonename", ")", "surfs", "=", "idf", ".", "idfobjects", "[", "'BuildingSurface:Detailed'", ".", "upper", "(", ")", "]", "zone_surfs", "=", "[", "s", "for", "s", "in", "surfs", "if", "s", ".", "Zone_Name", "==", "zone", ".", "Name", "]", "surf_xyzs", "=", "[", "eppy", ".", "function_helpers", ".", "getcoords", "(", "s", ")", "for", "s", "in", "zone_surfs", "]", "surf_xyzs", "=", "list", "(", "itertools", ".", "chain", "(", "*", "surf_xyzs", ")", ")", "surf_zs", "=", "[", "z", "for", "x", ",", "y", ",", "z", "in", "surf_xyzs", "]", "topz", "=", "max", "(", "surf_zs", ")", "botz", "=", "min", "(", "surf_zs", ")", "height", "=", "topz", "-", "botz", "return", "height" ]
zone height = max-min
[ "zone", "height", "=", "max", "-", "min" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L436-L447
santoshphilip/eppy
eppy/modeleditor.py
zoneheight
def zoneheight(idf, zonename, debug=False): """zone height""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR'] roofs = [s for s in zone_surfs if s.Surface_Type.upper() == 'ROOF'] if floors == [] or roofs == []: height = zone_height_min2max(idf, zonename) else: height = zone_floor2roofheight(idf, zonename) return height
python
def zoneheight(idf, zonename, debug=False): """zone height""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR'] roofs = [s for s in zone_surfs if s.Surface_Type.upper() == 'ROOF'] if floors == [] or roofs == []: height = zone_height_min2max(idf, zonename) else: height = zone_floor2roofheight(idf, zonename) return height
[ "def", "zoneheight", "(", "idf", ",", "zonename", ",", "debug", "=", "False", ")", ":", "zone", "=", "idf", ".", "getobject", "(", "'ZONE'", ",", "zonename", ")", "surfs", "=", "idf", ".", "idfobjects", "[", "'BuildingSurface:Detailed'", ".", "upper", "(", ")", "]", "zone_surfs", "=", "[", "s", "for", "s", "in", "surfs", "if", "s", ".", "Zone_Name", "==", "zone", ".", "Name", "]", "floors", "=", "[", "s", "for", "s", "in", "zone_surfs", "if", "s", ".", "Surface_Type", ".", "upper", "(", ")", "==", "'FLOOR'", "]", "roofs", "=", "[", "s", "for", "s", "in", "zone_surfs", "if", "s", ".", "Surface_Type", ".", "upper", "(", ")", "==", "'ROOF'", "]", "if", "floors", "==", "[", "]", "or", "roofs", "==", "[", "]", ":", "height", "=", "zone_height_min2max", "(", "idf", ",", "zonename", ")", "else", ":", "height", "=", "zone_floor2roofheight", "(", "idf", ",", "zonename", ")", "return", "height" ]
zone height
[ "zone", "height" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L450-L461
santoshphilip/eppy
eppy/modeleditor.py
zone_floor2roofheight
def zone_floor2roofheight(idf, zonename, debug=False): """zone floor to roof height""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR'] roofs = [s for s in zone_surfs if s.Surface_Type.upper() == 'ROOF'] ceilings = [s for s in zone_surfs if s.Surface_Type.upper() == 'CEILING'] topsurfaces = roofs + ceilings topz = [] for topsurface in topsurfaces: for coord in topsurface.coords: topz.append(coord[-1]) topz = max(topz) botz = [] for floor in floors: for coord in floor.coords: botz.append(coord[-1]) botz = min(botz) height = topz - botz return height
python
def zone_floor2roofheight(idf, zonename, debug=False): """zone floor to roof height""" zone = idf.getobject('ZONE', zonename) surfs = idf.idfobjects['BuildingSurface:Detailed'.upper()] zone_surfs = [s for s in surfs if s.Zone_Name == zone.Name] floors = [s for s in zone_surfs if s.Surface_Type.upper() == 'FLOOR'] roofs = [s for s in zone_surfs if s.Surface_Type.upper() == 'ROOF'] ceilings = [s for s in zone_surfs if s.Surface_Type.upper() == 'CEILING'] topsurfaces = roofs + ceilings topz = [] for topsurface in topsurfaces: for coord in topsurface.coords: topz.append(coord[-1]) topz = max(topz) botz = [] for floor in floors: for coord in floor.coords: botz.append(coord[-1]) botz = min(botz) height = topz - botz return height
[ "def", "zone_floor2roofheight", "(", "idf", ",", "zonename", ",", "debug", "=", "False", ")", ":", "zone", "=", "idf", ".", "getobject", "(", "'ZONE'", ",", "zonename", ")", "surfs", "=", "idf", ".", "idfobjects", "[", "'BuildingSurface:Detailed'", ".", "upper", "(", ")", "]", "zone_surfs", "=", "[", "s", "for", "s", "in", "surfs", "if", "s", ".", "Zone_Name", "==", "zone", ".", "Name", "]", "floors", "=", "[", "s", "for", "s", "in", "zone_surfs", "if", "s", ".", "Surface_Type", ".", "upper", "(", ")", "==", "'FLOOR'", "]", "roofs", "=", "[", "s", "for", "s", "in", "zone_surfs", "if", "s", ".", "Surface_Type", ".", "upper", "(", ")", "==", "'ROOF'", "]", "ceilings", "=", "[", "s", "for", "s", "in", "zone_surfs", "if", "s", ".", "Surface_Type", ".", "upper", "(", ")", "==", "'CEILING'", "]", "topsurfaces", "=", "roofs", "+", "ceilings", "topz", "=", "[", "]", "for", "topsurface", "in", "topsurfaces", ":", "for", "coord", "in", "topsurface", ".", "coords", ":", "topz", ".", "append", "(", "coord", "[", "-", "1", "]", ")", "topz", "=", "max", "(", "topz", ")", "botz", "=", "[", "]", "for", "floor", "in", "floors", ":", "for", "coord", "in", "floor", ".", "coords", ":", "botz", ".", "append", "(", "coord", "[", "-", "1", "]", ")", "botz", "=", "min", "(", "botz", ")", "height", "=", "topz", "-", "botz", "return", "height" ]
zone floor to roof height
[ "zone", "floor", "to", "roof", "height" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L464-L487
santoshphilip/eppy
eppy/modeleditor.py
zonevolume
def zonevolume(idf, zonename): """zone volume""" area = zonearea(idf, zonename) height = zoneheight(idf, zonename) volume = area * height return volume
python
def zonevolume(idf, zonename): """zone volume""" area = zonearea(idf, zonename) height = zoneheight(idf, zonename) volume = area * height return volume
[ "def", "zonevolume", "(", "idf", ",", "zonename", ")", ":", "area", "=", "zonearea", "(", "idf", ",", "zonename", ")", "height", "=", "zoneheight", "(", "idf", ",", "zonename", ")", "volume", "=", "area", "*", "height", "return", "volume" ]
zone volume
[ "zone", "volume" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L490-L496
santoshphilip/eppy
eppy/modeleditor.py
IDF.setiddname
def setiddname(cls, iddname, testing=False): """ Set the path to the EnergyPlus IDD for the version of EnergyPlus which is to be used by eppy. Parameters ---------- iddname : str Path to the IDD file. testing : bool Flag to use if running tests since we may want to ignore the `IDDAlreadySetError`. Raises ------ IDDAlreadySetError """ if cls.iddname == None: cls.iddname = iddname cls.idd_info = None cls.block = None elif cls.iddname == iddname: pass else: if testing == False: errortxt = "IDD file is set to: %s" % (cls.iddname,) raise IDDAlreadySetError(errortxt)
python
def setiddname(cls, iddname, testing=False): """ Set the path to the EnergyPlus IDD for the version of EnergyPlus which is to be used by eppy. Parameters ---------- iddname : str Path to the IDD file. testing : bool Flag to use if running tests since we may want to ignore the `IDDAlreadySetError`. Raises ------ IDDAlreadySetError """ if cls.iddname == None: cls.iddname = iddname cls.idd_info = None cls.block = None elif cls.iddname == iddname: pass else: if testing == False: errortxt = "IDD file is set to: %s" % (cls.iddname,) raise IDDAlreadySetError(errortxt)
[ "def", "setiddname", "(", "cls", ",", "iddname", ",", "testing", "=", "False", ")", ":", "if", "cls", ".", "iddname", "==", "None", ":", "cls", ".", "iddname", "=", "iddname", "cls", ".", "idd_info", "=", "None", "cls", ".", "block", "=", "None", "elif", "cls", ".", "iddname", "==", "iddname", ":", "pass", "else", ":", "if", "testing", "==", "False", ":", "errortxt", "=", "\"IDD file is set to: %s\"", "%", "(", "cls", ".", "iddname", ",", ")", "raise", "IDDAlreadySetError", "(", "errortxt", ")" ]
Set the path to the EnergyPlus IDD for the version of EnergyPlus which is to be used by eppy. Parameters ---------- iddname : str Path to the IDD file. testing : bool Flag to use if running tests since we may want to ignore the `IDDAlreadySetError`. Raises ------ IDDAlreadySetError
[ "Set", "the", "path", "to", "the", "EnergyPlus", "IDD", "for", "the", "version", "of", "EnergyPlus", "which", "is", "to", "be", "used", "by", "eppy", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L557-L584
santoshphilip/eppy
eppy/modeleditor.py
IDF.setidd
def setidd(cls, iddinfo, iddindex, block, idd_version): """Set the IDD to be used by eppy. Parameters ---------- iddinfo : list Comments and metadata about fields in the IDD. block : list Field names in the IDD. """ cls.idd_info = iddinfo cls.block = block cls.idd_index = iddindex cls.idd_version = idd_version
python
def setidd(cls, iddinfo, iddindex, block, idd_version): """Set the IDD to be used by eppy. Parameters ---------- iddinfo : list Comments and metadata about fields in the IDD. block : list Field names in the IDD. """ cls.idd_info = iddinfo cls.block = block cls.idd_index = iddindex cls.idd_version = idd_version
[ "def", "setidd", "(", "cls", ",", "iddinfo", ",", "iddindex", ",", "block", ",", "idd_version", ")", ":", "cls", ".", "idd_info", "=", "iddinfo", "cls", ".", "block", "=", "block", "cls", ".", "idd_index", "=", "iddindex", "cls", ".", "idd_version", "=", "idd_version" ]
Set the IDD to be used by eppy. Parameters ---------- iddinfo : list Comments and metadata about fields in the IDD. block : list Field names in the IDD.
[ "Set", "the", "IDD", "to", "be", "used", "by", "eppy", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L598-L612
santoshphilip/eppy
eppy/modeleditor.py
IDF.initread
def initread(self, idfname): """ Use the current IDD and read an IDF from file. If the IDD has not yet been initialised then this is done first. Parameters ---------- idf_name : str Path to an IDF file. """ with open(idfname, 'r') as _: # raise nonexistent file error early if idfname doesn't exist pass iddfhandle = StringIO(iddcurrent.iddtxt) if self.getiddname() == None: self.setiddname(iddfhandle) self.idfname = idfname self.read()
python
def initread(self, idfname): """ Use the current IDD and read an IDF from file. If the IDD has not yet been initialised then this is done first. Parameters ---------- idf_name : str Path to an IDF file. """ with open(idfname, 'r') as _: # raise nonexistent file error early if idfname doesn't exist pass iddfhandle = StringIO(iddcurrent.iddtxt) if self.getiddname() == None: self.setiddname(iddfhandle) self.idfname = idfname self.read()
[ "def", "initread", "(", "self", ",", "idfname", ")", ":", "with", "open", "(", "idfname", ",", "'r'", ")", "as", "_", ":", "# raise nonexistent file error early if idfname doesn't exist", "pass", "iddfhandle", "=", "StringIO", "(", "iddcurrent", ".", "iddtxt", ")", "if", "self", ".", "getiddname", "(", ")", "==", "None", ":", "self", ".", "setiddname", "(", "iddfhandle", ")", "self", ".", "idfname", "=", "idfname", "self", ".", "read", "(", ")" ]
Use the current IDD and read an IDF from file. If the IDD has not yet been initialised then this is done first. Parameters ---------- idf_name : str Path to an IDF file.
[ "Use", "the", "current", "IDD", "and", "read", "an", "IDF", "from", "file", ".", "If", "the", "IDD", "has", "not", "yet", "been", "initialised", "then", "this", "is", "done", "first", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L616-L634
santoshphilip/eppy
eppy/modeleditor.py
IDF.initreadtxt
def initreadtxt(self, idftxt): """ Use the current IDD and read an IDF from text data. If the IDD has not yet been initialised then this is done first. Parameters ---------- idftxt : str Text representing an IDF file. """ iddfhandle = StringIO(iddcurrent.iddtxt) if self.getiddname() == None: self.setiddname(iddfhandle) idfhandle = StringIO(idftxt) self.idfname = idfhandle self.read()
python
def initreadtxt(self, idftxt): """ Use the current IDD and read an IDF from text data. If the IDD has not yet been initialised then this is done first. Parameters ---------- idftxt : str Text representing an IDF file. """ iddfhandle = StringIO(iddcurrent.iddtxt) if self.getiddname() == None: self.setiddname(iddfhandle) idfhandle = StringIO(idftxt) self.idfname = idfhandle self.read()
[ "def", "initreadtxt", "(", "self", ",", "idftxt", ")", ":", "iddfhandle", "=", "StringIO", "(", "iddcurrent", ".", "iddtxt", ")", "if", "self", ".", "getiddname", "(", ")", "==", "None", ":", "self", ".", "setiddname", "(", "iddfhandle", ")", "idfhandle", "=", "StringIO", "(", "idftxt", ")", "self", ".", "idfname", "=", "idfhandle", "self", ".", "read", "(", ")" ]
Use the current IDD and read an IDF from text data. If the IDD has not yet been initialised then this is done first. Parameters ---------- idftxt : str Text representing an IDF file.
[ "Use", "the", "current", "IDD", "and", "read", "an", "IDF", "from", "text", "data", ".", "If", "the", "IDD", "has", "not", "yet", "been", "initialised", "then", "this", "is", "done", "first", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L636-L652
santoshphilip/eppy
eppy/modeleditor.py
IDF.read
def read(self): """ Read the IDF file and the IDD file. If the IDD file had already been read, it will not be read again. Read populates the following data structures: - idfobjects : list - model : list - idd_info : list - idd_index : dict """ if self.getiddname() == None: errortxt = ("IDD file needed to read the idf file. " "Set it using IDF.setiddname(iddfile)") raise IDDNotSetError(errortxt) readout = idfreader1( self.idfname, self.iddname, self, commdct=self.idd_info, block=self.block) (self.idfobjects, block, self.model, idd_info, idd_index, idd_version) = readout self.__class__.setidd(idd_info, idd_index, block, idd_version)
python
def read(self): """ Read the IDF file and the IDD file. If the IDD file had already been read, it will not be read again. Read populates the following data structures: - idfobjects : list - model : list - idd_info : list - idd_index : dict """ if self.getiddname() == None: errortxt = ("IDD file needed to read the idf file. " "Set it using IDF.setiddname(iddfile)") raise IDDNotSetError(errortxt) readout = idfreader1( self.idfname, self.iddname, self, commdct=self.idd_info, block=self.block) (self.idfobjects, block, self.model, idd_info, idd_index, idd_version) = readout self.__class__.setidd(idd_info, idd_index, block, idd_version)
[ "def", "read", "(", "self", ")", ":", "if", "self", ".", "getiddname", "(", ")", "==", "None", ":", "errortxt", "=", "(", "\"IDD file needed to read the idf file. \"", "\"Set it using IDF.setiddname(iddfile)\"", ")", "raise", "IDDNotSetError", "(", "errortxt", ")", "readout", "=", "idfreader1", "(", "self", ".", "idfname", ",", "self", ".", "iddname", ",", "self", ",", "commdct", "=", "self", ".", "idd_info", ",", "block", "=", "self", ".", "block", ")", "(", "self", ".", "idfobjects", ",", "block", ",", "self", ".", "model", ",", "idd_info", ",", "idd_index", ",", "idd_version", ")", "=", "readout", "self", ".", "__class__", ".", "setidd", "(", "idd_info", ",", "idd_index", ",", "block", ",", "idd_version", ")" ]
Read the IDF file and the IDD file. If the IDD file had already been read, it will not be read again. Read populates the following data structures: - idfobjects : list - model : list - idd_info : list - idd_index : dict
[ "Read", "the", "IDF", "file", "and", "the", "IDD", "file", ".", "If", "the", "IDD", "file", "had", "already", "been", "read", "it", "will", "not", "be", "read", "again", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L654-L676
santoshphilip/eppy
eppy/modeleditor.py
IDF.initnew
def initnew(self, fname): """ Use the current IDD and create a new empty IDF. If the IDD has not yet been initialised then this is done first. Parameters ---------- fname : str, optional Path to an IDF. This does not need to be set at this point. """ iddfhandle = StringIO(iddcurrent.iddtxt) if self.getiddname() == None: self.setiddname(iddfhandle) idfhandle = StringIO('') self.idfname = idfhandle self.read() if fname: self.idfname = fname
python
def initnew(self, fname): """ Use the current IDD and create a new empty IDF. If the IDD has not yet been initialised then this is done first. Parameters ---------- fname : str, optional Path to an IDF. This does not need to be set at this point. """ iddfhandle = StringIO(iddcurrent.iddtxt) if self.getiddname() == None: self.setiddname(iddfhandle) idfhandle = StringIO('') self.idfname = idfhandle self.read() if fname: self.idfname = fname
[ "def", "initnew", "(", "self", ",", "fname", ")", ":", "iddfhandle", "=", "StringIO", "(", "iddcurrent", ".", "iddtxt", ")", "if", "self", ".", "getiddname", "(", ")", "==", "None", ":", "self", ".", "setiddname", "(", "iddfhandle", ")", "idfhandle", "=", "StringIO", "(", "''", ")", "self", ".", "idfname", "=", "idfhandle", "self", ".", "read", "(", ")", "if", "fname", ":", "self", ".", "idfname", "=", "fname" ]
Use the current IDD and create a new empty IDF. If the IDD has not yet been initialised then this is done first. Parameters ---------- fname : str, optional Path to an IDF. This does not need to be set at this point.
[ "Use", "the", "current", "IDD", "and", "create", "a", "new", "empty", "IDF", ".", "If", "the", "IDD", "has", "not", "yet", "been", "initialised", "then", "this", "is", "done", "first", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L691-L709
santoshphilip/eppy
eppy/modeleditor.py
IDF.newidfobject
def newidfobject(self, key, aname='', defaultvalues=True, **kwargs): """ Add a new idfobject to the model. If you don't specify a value for a field, the default value will be set. For example :: newidfobject("CONSTRUCTION") newidfobject("CONSTRUCTION", Name='Interior Ceiling_class', Outside_Layer='LW Concrete', Layer_2='soundmat') Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. aname : str, deprecated This parameter is not used. It is left there for backward compatibility. defaultvalues: boolean default is True. If True default values WILL be set. If False, default values WILL NOT be set **kwargs Keyword arguments in the format `field=value` used to set the value of fields in the IDF object when it is created. Returns ------- EpBunch object """ obj = newrawobject(self.model, self.idd_info, key, block=self.block, defaultvalues=defaultvalues) abunch = obj2bunch(self.model, self.idd_info, obj) if aname: warnings.warn("The aname parameter should no longer be used.", UserWarning) namebunch(abunch, aname) self.idfobjects[key].append(abunch) for k, v in list(kwargs.items()): abunch[k] = v return abunch
python
def newidfobject(self, key, aname='', defaultvalues=True, **kwargs): """ Add a new idfobject to the model. If you don't specify a value for a field, the default value will be set. For example :: newidfobject("CONSTRUCTION") newidfobject("CONSTRUCTION", Name='Interior Ceiling_class', Outside_Layer='LW Concrete', Layer_2='soundmat') Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. aname : str, deprecated This parameter is not used. It is left there for backward compatibility. defaultvalues: boolean default is True. If True default values WILL be set. If False, default values WILL NOT be set **kwargs Keyword arguments in the format `field=value` used to set the value of fields in the IDF object when it is created. Returns ------- EpBunch object """ obj = newrawobject(self.model, self.idd_info, key, block=self.block, defaultvalues=defaultvalues) abunch = obj2bunch(self.model, self.idd_info, obj) if aname: warnings.warn("The aname parameter should no longer be used.", UserWarning) namebunch(abunch, aname) self.idfobjects[key].append(abunch) for k, v in list(kwargs.items()): abunch[k] = v return abunch
[ "def", "newidfobject", "(", "self", ",", "key", ",", "aname", "=", "''", ",", "defaultvalues", "=", "True", ",", "*", "*", "kwargs", ")", ":", "obj", "=", "newrawobject", "(", "self", ".", "model", ",", "self", ".", "idd_info", ",", "key", ",", "block", "=", "self", ".", "block", ",", "defaultvalues", "=", "defaultvalues", ")", "abunch", "=", "obj2bunch", "(", "self", ".", "model", ",", "self", ".", "idd_info", ",", "obj", ")", "if", "aname", ":", "warnings", ".", "warn", "(", "\"The aname parameter should no longer be used.\"", ",", "UserWarning", ")", "namebunch", "(", "abunch", ",", "aname", ")", "self", ".", "idfobjects", "[", "key", "]", ".", "append", "(", "abunch", ")", "for", "k", ",", "v", "in", "list", "(", "kwargs", ".", "items", "(", ")", ")", ":", "abunch", "[", "k", "]", "=", "v", "return", "abunch" ]
Add a new idfobject to the model. If you don't specify a value for a field, the default value will be set. For example :: newidfobject("CONSTRUCTION") newidfobject("CONSTRUCTION", Name='Interior Ceiling_class', Outside_Layer='LW Concrete', Layer_2='soundmat') Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. aname : str, deprecated This parameter is not used. It is left there for backward compatibility. defaultvalues: boolean default is True. If True default values WILL be set. If False, default values WILL NOT be set **kwargs Keyword arguments in the format `field=value` used to set the value of fields in the IDF object when it is created. Returns ------- EpBunch object
[ "Add", "a", "new", "idfobject", "to", "the", "model", ".", "If", "you", "don", "t", "specify", "a", "value", "for", "a", "field", "the", "default", "value", "will", "be", "set", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L713-L755
santoshphilip/eppy
eppy/modeleditor.py
IDF.removeidfobject
def removeidfobject(self, idfobject): """Remove an IDF object from the IDF. Parameters ---------- idfobject : EpBunch object The IDF object to remove. """ key = idfobject.key.upper() self.idfobjects[key].remove(idfobject)
python
def removeidfobject(self, idfobject): """Remove an IDF object from the IDF. Parameters ---------- idfobject : EpBunch object The IDF object to remove. """ key = idfobject.key.upper() self.idfobjects[key].remove(idfobject)
[ "def", "removeidfobject", "(", "self", ",", "idfobject", ")", ":", "key", "=", "idfobject", ".", "key", ".", "upper", "(", ")", "self", ".", "idfobjects", "[", "key", "]", ".", "remove", "(", "idfobject", ")" ]
Remove an IDF object from the IDF. Parameters ---------- idfobject : EpBunch object The IDF object to remove.
[ "Remove", "an", "IDF", "object", "from", "the", "IDF", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L774-L784
santoshphilip/eppy
eppy/modeleditor.py
IDF.copyidfobject
def copyidfobject(self, idfobject): """Add an IDF object to the IDF. Parameters ---------- idfobject : EpBunch object The IDF object to remove. This usually comes from another idf file, or it can be used to copy within this idf file. """ return addthisbunch(self.idfobjects, self.model, self.idd_info, idfobject, self)
python
def copyidfobject(self, idfobject): """Add an IDF object to the IDF. Parameters ---------- idfobject : EpBunch object The IDF object to remove. This usually comes from another idf file, or it can be used to copy within this idf file. """ return addthisbunch(self.idfobjects, self.model, self.idd_info, idfobject, self)
[ "def", "copyidfobject", "(", "self", ",", "idfobject", ")", ":", "return", "addthisbunch", "(", "self", ".", "idfobjects", ",", "self", ".", "model", ",", "self", ".", "idd_info", ",", "idfobject", ",", "self", ")" ]
Add an IDF object to the IDF. Parameters ---------- idfobject : EpBunch object The IDF object to remove. This usually comes from another idf file, or it can be used to copy within this idf file.
[ "Add", "an", "IDF", "object", "to", "the", "IDF", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L786-L799
santoshphilip/eppy
eppy/modeleditor.py
IDF.getextensibleindex
def getextensibleindex(self, key, name): """ Get the index of the first extensible item. Only for internal use. # TODO : hide this Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. name : str The name of the object to fetch. Returns ------- int """ return getextensibleindex( self.idfobjects, self.model, self.idd_info, key, name)
python
def getextensibleindex(self, key, name): """ Get the index of the first extensible item. Only for internal use. # TODO : hide this Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. name : str The name of the object to fetch. Returns ------- int """ return getextensibleindex( self.idfobjects, self.model, self.idd_info, key, name)
[ "def", "getextensibleindex", "(", "self", ",", "key", ",", "name", ")", ":", "return", "getextensibleindex", "(", "self", ".", "idfobjects", ",", "self", ".", "model", ",", "self", ".", "idd_info", ",", "key", ",", "name", ")" ]
Get the index of the first extensible item. Only for internal use. # TODO : hide this Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. name : str The name of the object to fetch. Returns ------- int
[ "Get", "the", "index", "of", "the", "first", "extensible", "item", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L818-L838
santoshphilip/eppy
eppy/modeleditor.py
IDF.removeextensibles
def removeextensibles(self, key, name): """ Remove extensible items in the object of key and name. Only for internal use. # TODO : hide this Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. name : str The name of the object to fetch. Returns ------- EpBunch object """ return removeextensibles( self.idfobjects, self.model, self.idd_info, key, name)
python
def removeextensibles(self, key, name): """ Remove extensible items in the object of key and name. Only for internal use. # TODO : hide this Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. name : str The name of the object to fetch. Returns ------- EpBunch object """ return removeextensibles( self.idfobjects, self.model, self.idd_info, key, name)
[ "def", "removeextensibles", "(", "self", ",", "key", ",", "name", ")", ":", "return", "removeextensibles", "(", "self", ".", "idfobjects", ",", "self", ".", "model", ",", "self", ".", "idd_info", ",", "key", ",", "name", ")" ]
Remove extensible items in the object of key and name. Only for internal use. # TODO : hide this Parameters ---------- key : str The type of IDF object. This must be in ALL_CAPS. name : str The name of the object to fetch. Returns ------- EpBunch object
[ "Remove", "extensible", "items", "in", "the", "object", "of", "key", "and", "name", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L840-L860
santoshphilip/eppy
eppy/modeleditor.py
IDF.idfstr
def idfstr(self): """String representation of the IDF. Returns ------- str """ if self.outputtype == 'standard': astr = '' else: astr = self.model.__repr__() if self.outputtype == 'standard': astr = '' dtls = self.model.dtls for objname in dtls: for obj in self.idfobjects[objname]: astr = astr + obj.__repr__() elif self.outputtype == 'nocomment': return astr elif self.outputtype == 'nocomment1': slist = astr.split('\n') slist = [item.strip() for item in slist] astr = '\n'.join(slist) elif self.outputtype == 'nocomment2': slist = astr.split('\n') slist = [item.strip() for item in slist] slist = [item for item in slist if item != ''] astr = '\n'.join(slist) elif self.outputtype == 'compressed': slist = astr.split('\n') slist = [item.strip() for item in slist] astr = ' '.join(slist) else: raise ValueError("%s is not a valid outputtype" % self.outputtype) return astr
python
def idfstr(self): """String representation of the IDF. Returns ------- str """ if self.outputtype == 'standard': astr = '' else: astr = self.model.__repr__() if self.outputtype == 'standard': astr = '' dtls = self.model.dtls for objname in dtls: for obj in self.idfobjects[objname]: astr = astr + obj.__repr__() elif self.outputtype == 'nocomment': return astr elif self.outputtype == 'nocomment1': slist = astr.split('\n') slist = [item.strip() for item in slist] astr = '\n'.join(slist) elif self.outputtype == 'nocomment2': slist = astr.split('\n') slist = [item.strip() for item in slist] slist = [item for item in slist if item != ''] astr = '\n'.join(slist) elif self.outputtype == 'compressed': slist = astr.split('\n') slist = [item.strip() for item in slist] astr = ' '.join(slist) else: raise ValueError("%s is not a valid outputtype" % self.outputtype) return astr
[ "def", "idfstr", "(", "self", ")", ":", "if", "self", ".", "outputtype", "==", "'standard'", ":", "astr", "=", "''", "else", ":", "astr", "=", "self", ".", "model", ".", "__repr__", "(", ")", "if", "self", ".", "outputtype", "==", "'standard'", ":", "astr", "=", "''", "dtls", "=", "self", ".", "model", ".", "dtls", "for", "objname", "in", "dtls", ":", "for", "obj", "in", "self", ".", "idfobjects", "[", "objname", "]", ":", "astr", "=", "astr", "+", "obj", ".", "__repr__", "(", ")", "elif", "self", ".", "outputtype", "==", "'nocomment'", ":", "return", "astr", "elif", "self", ".", "outputtype", "==", "'nocomment1'", ":", "slist", "=", "astr", ".", "split", "(", "'\\n'", ")", "slist", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "slist", "]", "astr", "=", "'\\n'", ".", "join", "(", "slist", ")", "elif", "self", ".", "outputtype", "==", "'nocomment2'", ":", "slist", "=", "astr", ".", "split", "(", "'\\n'", ")", "slist", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "slist", "]", "slist", "=", "[", "item", "for", "item", "in", "slist", "if", "item", "!=", "''", "]", "astr", "=", "'\\n'", ".", "join", "(", "slist", ")", "elif", "self", ".", "outputtype", "==", "'compressed'", ":", "slist", "=", "astr", ".", "split", "(", "'\\n'", ")", "slist", "=", "[", "item", ".", "strip", "(", ")", "for", "item", "in", "slist", "]", "astr", "=", "' '", ".", "join", "(", "slist", ")", "else", ":", "raise", "ValueError", "(", "\"%s is not a valid outputtype\"", "%", "self", ".", "outputtype", ")", "return", "astr" ]
String representation of the IDF. Returns ------- str
[ "String", "representation", "of", "the", "IDF", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L869-L905
santoshphilip/eppy
eppy/modeleditor.py
IDF.save
def save(self, filename=None, lineendings='default', encoding='latin-1'): """ Save the IDF as a text file with the optional filename passed, or with the current idfname of the IDF. Parameters ---------- filename : str, optional Filepath to save the file. If None then use the IDF.idfname parameter. Also accepts a file handle. lineendings : str, optional Line endings to use in the saved file. Options are 'default', 'windows' and 'unix' the default is 'default' which uses the line endings for the current system. encoding : str, optional Encoding to use for the saved file. The default is 'latin-1' which is compatible with the EnergyPlus IDFEditor. """ if filename is None: filename = self.idfname s = self.idfstr() if lineendings == 'default': system = platform.system() s = '!- {} Line endings \n'.format(system) + s slines = s.splitlines() s = os.linesep.join(slines) elif lineendings == 'windows': s = '!- Windows Line endings \n' + s slines = s.splitlines() s = '\r\n'.join(slines) elif lineendings == 'unix': s = '!- Unix Line endings \n' + s slines = s.splitlines() s = '\n'.join(slines) s = s.encode(encoding) try: with open(filename, 'wb') as idf_out: idf_out.write(s) except TypeError: # in the case that filename is a file handle try: filename.write(s) except TypeError: filename.write(s.decode(encoding))
python
def save(self, filename=None, lineendings='default', encoding='latin-1'): """ Save the IDF as a text file with the optional filename passed, or with the current idfname of the IDF. Parameters ---------- filename : str, optional Filepath to save the file. If None then use the IDF.idfname parameter. Also accepts a file handle. lineendings : str, optional Line endings to use in the saved file. Options are 'default', 'windows' and 'unix' the default is 'default' which uses the line endings for the current system. encoding : str, optional Encoding to use for the saved file. The default is 'latin-1' which is compatible with the EnergyPlus IDFEditor. """ if filename is None: filename = self.idfname s = self.idfstr() if lineendings == 'default': system = platform.system() s = '!- {} Line endings \n'.format(system) + s slines = s.splitlines() s = os.linesep.join(slines) elif lineendings == 'windows': s = '!- Windows Line endings \n' + s slines = s.splitlines() s = '\r\n'.join(slines) elif lineendings == 'unix': s = '!- Unix Line endings \n' + s slines = s.splitlines() s = '\n'.join(slines) s = s.encode(encoding) try: with open(filename, 'wb') as idf_out: idf_out.write(s) except TypeError: # in the case that filename is a file handle try: filename.write(s) except TypeError: filename.write(s.decode(encoding))
[ "def", "save", "(", "self", ",", "filename", "=", "None", ",", "lineendings", "=", "'default'", ",", "encoding", "=", "'latin-1'", ")", ":", "if", "filename", "is", "None", ":", "filename", "=", "self", ".", "idfname", "s", "=", "self", ".", "idfstr", "(", ")", "if", "lineendings", "==", "'default'", ":", "system", "=", "platform", ".", "system", "(", ")", "s", "=", "'!- {} Line endings \\n'", ".", "format", "(", "system", ")", "+", "s", "slines", "=", "s", ".", "splitlines", "(", ")", "s", "=", "os", ".", "linesep", ".", "join", "(", "slines", ")", "elif", "lineendings", "==", "'windows'", ":", "s", "=", "'!- Windows Line endings \\n'", "+", "s", "slines", "=", "s", ".", "splitlines", "(", ")", "s", "=", "'\\r\\n'", ".", "join", "(", "slines", ")", "elif", "lineendings", "==", "'unix'", ":", "s", "=", "'!- Unix Line endings \\n'", "+", "s", "slines", "=", "s", ".", "splitlines", "(", ")", "s", "=", "'\\n'", ".", "join", "(", "slines", ")", "s", "=", "s", ".", "encode", "(", "encoding", ")", "try", ":", "with", "open", "(", "filename", ",", "'wb'", ")", "as", "idf_out", ":", "idf_out", ".", "write", "(", "s", ")", "except", "TypeError", ":", "# in the case that filename is a file handle", "try", ":", "filename", ".", "write", "(", "s", ")", "except", "TypeError", ":", "filename", ".", "write", "(", "s", ".", "decode", "(", "encoding", ")", ")" ]
Save the IDF as a text file with the optional filename passed, or with the current idfname of the IDF. Parameters ---------- filename : str, optional Filepath to save the file. If None then use the IDF.idfname parameter. Also accepts a file handle. lineendings : str, optional Line endings to use in the saved file. Options are 'default', 'windows' and 'unix' the default is 'default' which uses the line endings for the current system. encoding : str, optional Encoding to use for the saved file. The default is 'latin-1' which is compatible with the EnergyPlus IDFEditor.
[ "Save", "the", "IDF", "as", "a", "text", "file", "with", "the", "optional", "filename", "passed", "or", "with", "the", "current", "idfname", "of", "the", "IDF", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L907-L953
santoshphilip/eppy
eppy/modeleditor.py
IDF.saveas
def saveas(self, filename, lineendings='default', encoding='latin-1'): """ Save the IDF as a text file with the filename passed. Parameters ---------- filename : str Filepath to to set the idfname attribute to and save the file as. lineendings : str, optional Line endings to use in the saved file. Options are 'default', 'windows' and 'unix' the default is 'default' which uses the line endings for the current system. encoding : str, optional Encoding to use for the saved file. The default is 'latin-1' which is compatible with the EnergyPlus IDFEditor. """ self.idfname = filename self.save(filename, lineendings, encoding)
python
def saveas(self, filename, lineendings='default', encoding='latin-1'): """ Save the IDF as a text file with the filename passed. Parameters ---------- filename : str Filepath to to set the idfname attribute to and save the file as. lineendings : str, optional Line endings to use in the saved file. Options are 'default', 'windows' and 'unix' the default is 'default' which uses the line endings for the current system. encoding : str, optional Encoding to use for the saved file. The default is 'latin-1' which is compatible with the EnergyPlus IDFEditor. """ self.idfname = filename self.save(filename, lineendings, encoding)
[ "def", "saveas", "(", "self", ",", "filename", ",", "lineendings", "=", "'default'", ",", "encoding", "=", "'latin-1'", ")", ":", "self", ".", "idfname", "=", "filename", "self", ".", "save", "(", "filename", ",", "lineendings", ",", "encoding", ")" ]
Save the IDF as a text file with the filename passed. Parameters ---------- filename : str Filepath to to set the idfname attribute to and save the file as. lineendings : str, optional Line endings to use in the saved file. Options are 'default', 'windows' and 'unix' the default is 'default' which uses the line endings for the current system. encoding : str, optional Encoding to use for the saved file. The default is 'latin-1' which is compatible with the EnergyPlus IDFEditor.
[ "Save", "the", "IDF", "as", "a", "text", "file", "with", "the", "filename", "passed", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L955-L974
santoshphilip/eppy
eppy/modeleditor.py
IDF.savecopy
def savecopy(self, filename, lineendings='default', encoding='latin-1'): """Save a copy of the file with the filename passed. Parameters ---------- filename : str Filepath to save the file. lineendings : str, optional Line endings to use in the saved file. Options are 'default', 'windows' and 'unix' the default is 'default' which uses the line endings for the current system. encoding : str, optional Encoding to use for the saved file. The default is 'latin-1' which is compatible with the EnergyPlus IDFEditor. """ self.save(filename, lineendings, encoding)
python
def savecopy(self, filename, lineendings='default', encoding='latin-1'): """Save a copy of the file with the filename passed. Parameters ---------- filename : str Filepath to save the file. lineendings : str, optional Line endings to use in the saved file. Options are 'default', 'windows' and 'unix' the default is 'default' which uses the line endings for the current system. encoding : str, optional Encoding to use for the saved file. The default is 'latin-1' which is compatible with the EnergyPlus IDFEditor. """ self.save(filename, lineendings, encoding)
[ "def", "savecopy", "(", "self", ",", "filename", ",", "lineendings", "=", "'default'", ",", "encoding", "=", "'latin-1'", ")", ":", "self", ".", "save", "(", "filename", ",", "lineendings", ",", "encoding", ")" ]
Save a copy of the file with the filename passed. Parameters ---------- filename : str Filepath to save the file. lineendings : str, optional Line endings to use in the saved file. Options are 'default', 'windows' and 'unix' the default is 'default' which uses the line endings for the current system. encoding : str, optional Encoding to use for the saved file. The default is 'latin-1' which is compatible with the EnergyPlus IDFEditor.
[ "Save", "a", "copy", "of", "the", "file", "with", "the", "filename", "passed", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L976-L994
santoshphilip/eppy
eppy/modeleditor.py
IDF.run
def run(self, **kwargs): """ Run an IDF file with a given EnergyPlus weather file. This is a wrapper for the EnergyPlus command line interface. Parameters ---------- **kwargs See eppy.runner.functions.run() """ # write the IDF to the current directory self.saveas('in.idf') # if `idd` is not passed explicitly, use the IDF.iddname idd = kwargs.pop('idd', self.iddname) epw = kwargs.pop('weather', self.epw) try: run(self, weather=epw, idd=idd, **kwargs) finally: os.remove('in.idf')
python
def run(self, **kwargs): """ Run an IDF file with a given EnergyPlus weather file. This is a wrapper for the EnergyPlus command line interface. Parameters ---------- **kwargs See eppy.runner.functions.run() """ # write the IDF to the current directory self.saveas('in.idf') # if `idd` is not passed explicitly, use the IDF.iddname idd = kwargs.pop('idd', self.iddname) epw = kwargs.pop('weather', self.epw) try: run(self, weather=epw, idd=idd, **kwargs) finally: os.remove('in.idf')
[ "def", "run", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# write the IDF to the current directory", "self", ".", "saveas", "(", "'in.idf'", ")", "# if `idd` is not passed explicitly, use the IDF.iddname", "idd", "=", "kwargs", ".", "pop", "(", "'idd'", ",", "self", ".", "iddname", ")", "epw", "=", "kwargs", ".", "pop", "(", "'weather'", ",", "self", ".", "epw", ")", "try", ":", "run", "(", "self", ",", "weather", "=", "epw", ",", "idd", "=", "idd", ",", "*", "*", "kwargs", ")", "finally", ":", "os", ".", "remove", "(", "'in.idf'", ")" ]
Run an IDF file with a given EnergyPlus weather file. This is a wrapper for the EnergyPlus command line interface. Parameters ---------- **kwargs See eppy.runner.functions.run()
[ "Run", "an", "IDF", "file", "with", "a", "given", "EnergyPlus", "weather", "file", ".", "This", "is", "a", "wrapper", "for", "the", "EnergyPlus", "command", "line", "interface", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/modeleditor.py#L997-L1016
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
readfile
def readfile(filename): """readfile""" fhandle = open(filename, 'rb') data = fhandle.read() try: data = data.decode('ISO-8859-2') except AttributeError: pass fhandle.close() return data
python
def readfile(filename): """readfile""" fhandle = open(filename, 'rb') data = fhandle.read() try: data = data.decode('ISO-8859-2') except AttributeError: pass fhandle.close() return data
[ "def", "readfile", "(", "filename", ")", ":", "fhandle", "=", "open", "(", "filename", ",", "'rb'", ")", "data", "=", "fhandle", ".", "read", "(", ")", "try", ":", "data", "=", "data", ".", "decode", "(", "'ISO-8859-2'", ")", "except", "AttributeError", ":", "pass", "fhandle", ".", "close", "(", ")", "return", "data" ]
readfile
[ "readfile" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L22-L31
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
printdict
def printdict(adict): """printdict""" dlist = list(adict.keys()) dlist.sort() for i in range(0, len(dlist)): print(dlist[i], adict[dlist[i]])
python
def printdict(adict): """printdict""" dlist = list(adict.keys()) dlist.sort() for i in range(0, len(dlist)): print(dlist[i], adict[dlist[i]])
[ "def", "printdict", "(", "adict", ")", ":", "dlist", "=", "list", "(", "adict", ".", "keys", "(", ")", ")", "dlist", ".", "sort", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "dlist", ")", ")", ":", "print", "(", "dlist", "[", "i", "]", ",", "adict", "[", "dlist", "[", "i", "]", "]", ")" ]
printdict
[ "printdict" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L38-L43
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
tabfile2list
def tabfile2list(fname): "tabfile2list" #dat = mylib1.readfileasmac(fname) #data = string.strip(dat) data = mylib1.readfileasmac(fname) #data = data[:-2]#remove the last return alist = data.split('\r')#since I read it as a mac file blist = alist[1].split('\t') clist = [] for num in range(0, len(alist)): ilist = alist[num].split('\t') clist = clist+[ilist] cclist = clist[:-1]#the last element is turning out to be empty return cclist
python
def tabfile2list(fname): "tabfile2list" #dat = mylib1.readfileasmac(fname) #data = string.strip(dat) data = mylib1.readfileasmac(fname) #data = data[:-2]#remove the last return alist = data.split('\r')#since I read it as a mac file blist = alist[1].split('\t') clist = [] for num in range(0, len(alist)): ilist = alist[num].split('\t') clist = clist+[ilist] cclist = clist[:-1]#the last element is turning out to be empty return cclist
[ "def", "tabfile2list", "(", "fname", ")", ":", "#dat = mylib1.readfileasmac(fname)", "#data = string.strip(dat)", "data", "=", "mylib1", ".", "readfileasmac", "(", "fname", ")", "#data = data[:-2]#remove the last return", "alist", "=", "data", ".", "split", "(", "'\\r'", ")", "#since I read it as a mac file", "blist", "=", "alist", "[", "1", "]", ".", "split", "(", "'\\t'", ")", "clist", "=", "[", "]", "for", "num", "in", "range", "(", "0", ",", "len", "(", "alist", ")", ")", ":", "ilist", "=", "alist", "[", "num", "]", ".", "split", "(", "'\\t'", ")", "clist", "=", "clist", "+", "[", "ilist", "]", "cclist", "=", "clist", "[", ":", "-", "1", "]", "#the last element is turning out to be empty", "return", "cclist" ]
tabfile2list
[ "tabfile2list" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L45-L59
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
tabstr2list
def tabstr2list(data): """tabstr2list""" alist = data.split(os.linesep) blist = alist[1].split('\t') clist = [] for num in range(0, len(alist)): ilist = alist[num].split('\t') clist = clist+[ilist] cclist = clist[:-1] #the last element is turning out to be empty #this is because the string ends with a os.linesep return cclist
python
def tabstr2list(data): """tabstr2list""" alist = data.split(os.linesep) blist = alist[1].split('\t') clist = [] for num in range(0, len(alist)): ilist = alist[num].split('\t') clist = clist+[ilist] cclist = clist[:-1] #the last element is turning out to be empty #this is because the string ends with a os.linesep return cclist
[ "def", "tabstr2list", "(", "data", ")", ":", "alist", "=", "data", ".", "split", "(", "os", ".", "linesep", ")", "blist", "=", "alist", "[", "1", "]", ".", "split", "(", "'\\t'", ")", "clist", "=", "[", "]", "for", "num", "in", "range", "(", "0", ",", "len", "(", "alist", ")", ")", ":", "ilist", "=", "alist", "[", "num", "]", ".", "split", "(", "'\\t'", ")", "clist", "=", "clist", "+", "[", "ilist", "]", "cclist", "=", "clist", "[", ":", "-", "1", "]", "#the last element is turning out to be empty", "#this is because the string ends with a os.linesep", "return", "cclist" ]
tabstr2list
[ "tabstr2list" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L61-L73
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
list2doe
def list2doe(alist): """list2doe""" theequal = '' astr = '' lenj = len(alist) leni = len(alist[0]) for i in range(0, leni-1): for j in range(0, lenj): if j == 0: astr = astr + alist[j][i + 1] + theequal + alist[j][0] + RET else: astr = astr + alist[j][0] + theequal + alist[j][i + 1] + RET astr = astr + RET return astr
python
def list2doe(alist): """list2doe""" theequal = '' astr = '' lenj = len(alist) leni = len(alist[0]) for i in range(0, leni-1): for j in range(0, lenj): if j == 0: astr = astr + alist[j][i + 1] + theequal + alist[j][0] + RET else: astr = astr + alist[j][0] + theequal + alist[j][i + 1] + RET astr = astr + RET return astr
[ "def", "list2doe", "(", "alist", ")", ":", "theequal", "=", "''", "astr", "=", "''", "lenj", "=", "len", "(", "alist", ")", "leni", "=", "len", "(", "alist", "[", "0", "]", ")", "for", "i", "in", "range", "(", "0", ",", "leni", "-", "1", ")", ":", "for", "j", "in", "range", "(", "0", ",", "lenj", ")", ":", "if", "j", "==", "0", ":", "astr", "=", "astr", "+", "alist", "[", "j", "]", "[", "i", "+", "1", "]", "+", "theequal", "+", "alist", "[", "j", "]", "[", "0", "]", "+", "RET", "else", ":", "astr", "=", "astr", "+", "alist", "[", "j", "]", "[", "0", "]", "+", "theequal", "+", "alist", "[", "j", "]", "[", "i", "+", "1", "]", "+", "RET", "astr", "=", "astr", "+", "RET", "return", "astr" ]
list2doe
[ "list2doe" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L75-L88
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
tabfile2doefile
def tabfile2doefile(tabfile, doefile): """tabfile2doefile""" alist = tabfile2list(tabfile) astr = list2doe(alist) mylib1.write_str2file(doefile, astr)
python
def tabfile2doefile(tabfile, doefile): """tabfile2doefile""" alist = tabfile2list(tabfile) astr = list2doe(alist) mylib1.write_str2file(doefile, astr)
[ "def", "tabfile2doefile", "(", "tabfile", ",", "doefile", ")", ":", "alist", "=", "tabfile2list", "(", "tabfile", ")", "astr", "=", "list2doe", "(", "alist", ")", "mylib1", ".", "write_str2file", "(", "doefile", ",", "astr", ")" ]
tabfile2doefile
[ "tabfile2doefile" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L90-L94
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
makedoedict
def makedoedict(str1): """makedoedict""" blocklist = str1.split('..') blocklist = blocklist[:-1]#remove empty item after last '..' blockdict = {} belongsdict = {} for num in range(0, len(blocklist)): blocklist[num] = blocklist[num].strip() linelist = blocklist[num].split(os.linesep) aline = linelist[0] alinelist = aline.split('=') name = alinelist[0].strip() aline = linelist[1] alinelist = aline.split('=') belongs = alinelist[-1].strip() theblock = blocklist[num] + os.linesep + '..' + os.linesep + os.linesep #put the '..' back in the block blockdict[name] = theblock belongsdict[name] = belongs return [blockdict, belongsdict]
python
def makedoedict(str1): """makedoedict""" blocklist = str1.split('..') blocklist = blocklist[:-1]#remove empty item after last '..' blockdict = {} belongsdict = {} for num in range(0, len(blocklist)): blocklist[num] = blocklist[num].strip() linelist = blocklist[num].split(os.linesep) aline = linelist[0] alinelist = aline.split('=') name = alinelist[0].strip() aline = linelist[1] alinelist = aline.split('=') belongs = alinelist[-1].strip() theblock = blocklist[num] + os.linesep + '..' + os.linesep + os.linesep #put the '..' back in the block blockdict[name] = theblock belongsdict[name] = belongs return [blockdict, belongsdict]
[ "def", "makedoedict", "(", "str1", ")", ":", "blocklist", "=", "str1", ".", "split", "(", "'..'", ")", "blocklist", "=", "blocklist", "[", ":", "-", "1", "]", "#remove empty item after last '..'", "blockdict", "=", "{", "}", "belongsdict", "=", "{", "}", "for", "num", "in", "range", "(", "0", ",", "len", "(", "blocklist", ")", ")", ":", "blocklist", "[", "num", "]", "=", "blocklist", "[", "num", "]", ".", "strip", "(", ")", "linelist", "=", "blocklist", "[", "num", "]", ".", "split", "(", "os", ".", "linesep", ")", "aline", "=", "linelist", "[", "0", "]", "alinelist", "=", "aline", ".", "split", "(", "'='", ")", "name", "=", "alinelist", "[", "0", "]", ".", "strip", "(", ")", "aline", "=", "linelist", "[", "1", "]", "alinelist", "=", "aline", ".", "split", "(", "'='", ")", "belongs", "=", "alinelist", "[", "-", "1", "]", ".", "strip", "(", ")", "theblock", "=", "blocklist", "[", "num", "]", "+", "os", ".", "linesep", "+", "'..'", "+", "os", ".", "linesep", "+", "os", ".", "linesep", "#put the '..' back in the block", "blockdict", "[", "name", "]", "=", "theblock", "belongsdict", "[", "name", "]", "=", "belongs", "return", "[", "blockdict", ",", "belongsdict", "]" ]
makedoedict
[ "makedoedict" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L102-L121
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
makedoetree
def makedoetree(ddict, bdict): """makedoetree""" dlist = list(ddict.keys()) blist = list(bdict.keys()) dlist.sort() blist.sort() #make space dict doesnot = 'DOES NOT' lst = [] for num in range(0, len(blist)): if bdict[blist[num]] == doesnot:#belong lst = lst + [blist[num]] doedict = {} for num in range(0, len(lst)): #print lst[num] doedict[lst[num]] = {} lv1list = list(doedict.keys()) lv1list.sort() #make wall dict #for each space for i in range(0, len(lv1list)): walllist = [] adict = doedict[lv1list[i]] #loop thru the entire blist dictonary and list the ones that belong into walllist for num in range(0, len(blist)): if bdict[blist[num]] == lv1list[i]: walllist = walllist + [blist[num]] #put walllist into dict for j in range(0, len(walllist)): adict[walllist[j]] = {} #make window dict #for each space for i in range(0, len(lv1list)): adict1 = doedict[lv1list[i]] #for each wall walllist = list(adict1.keys()) walllist.sort() for j in range(0, len(walllist)): windlist = [] adict2 = adict1[walllist[j]] #loop thru the entire blist dictonary and list the ones that belong into windlist for num in range(0, len(blist)): if bdict[blist[num]] == walllist[j]: windlist = windlist + [blist[num]] #put walllist into dict for k in range(0, len(windlist)): adict2[windlist[k]] = {} return doedict
python
def makedoetree(ddict, bdict): """makedoetree""" dlist = list(ddict.keys()) blist = list(bdict.keys()) dlist.sort() blist.sort() #make space dict doesnot = 'DOES NOT' lst = [] for num in range(0, len(blist)): if bdict[blist[num]] == doesnot:#belong lst = lst + [blist[num]] doedict = {} for num in range(0, len(lst)): #print lst[num] doedict[lst[num]] = {} lv1list = list(doedict.keys()) lv1list.sort() #make wall dict #for each space for i in range(0, len(lv1list)): walllist = [] adict = doedict[lv1list[i]] #loop thru the entire blist dictonary and list the ones that belong into walllist for num in range(0, len(blist)): if bdict[blist[num]] == lv1list[i]: walllist = walllist + [blist[num]] #put walllist into dict for j in range(0, len(walllist)): adict[walllist[j]] = {} #make window dict #for each space for i in range(0, len(lv1list)): adict1 = doedict[lv1list[i]] #for each wall walllist = list(adict1.keys()) walllist.sort() for j in range(0, len(walllist)): windlist = [] adict2 = adict1[walllist[j]] #loop thru the entire blist dictonary and list the ones that belong into windlist for num in range(0, len(blist)): if bdict[blist[num]] == walllist[j]: windlist = windlist + [blist[num]] #put walllist into dict for k in range(0, len(windlist)): adict2[windlist[k]] = {} return doedict
[ "def", "makedoetree", "(", "ddict", ",", "bdict", ")", ":", "dlist", "=", "list", "(", "ddict", ".", "keys", "(", ")", ")", "blist", "=", "list", "(", "bdict", ".", "keys", "(", ")", ")", "dlist", ".", "sort", "(", ")", "blist", ".", "sort", "(", ")", "#make space dict", "doesnot", "=", "'DOES NOT'", "lst", "=", "[", "]", "for", "num", "in", "range", "(", "0", ",", "len", "(", "blist", ")", ")", ":", "if", "bdict", "[", "blist", "[", "num", "]", "]", "==", "doesnot", ":", "#belong", "lst", "=", "lst", "+", "[", "blist", "[", "num", "]", "]", "doedict", "=", "{", "}", "for", "num", "in", "range", "(", "0", ",", "len", "(", "lst", ")", ")", ":", "#print lst[num]", "doedict", "[", "lst", "[", "num", "]", "]", "=", "{", "}", "lv1list", "=", "list", "(", "doedict", ".", "keys", "(", ")", ")", "lv1list", ".", "sort", "(", ")", "#make wall dict", "#for each space", "for", "i", "in", "range", "(", "0", ",", "len", "(", "lv1list", ")", ")", ":", "walllist", "=", "[", "]", "adict", "=", "doedict", "[", "lv1list", "[", "i", "]", "]", "#loop thru the entire blist dictonary and list the ones that belong into walllist", "for", "num", "in", "range", "(", "0", ",", "len", "(", "blist", ")", ")", ":", "if", "bdict", "[", "blist", "[", "num", "]", "]", "==", "lv1list", "[", "i", "]", ":", "walllist", "=", "walllist", "+", "[", "blist", "[", "num", "]", "]", "#put walllist into dict", "for", "j", "in", "range", "(", "0", ",", "len", "(", "walllist", ")", ")", ":", "adict", "[", "walllist", "[", "j", "]", "]", "=", "{", "}", "#make window dict", "#for each space", "for", "i", "in", "range", "(", "0", ",", "len", "(", "lv1list", ")", ")", ":", "adict1", "=", "doedict", "[", "lv1list", "[", "i", "]", "]", "#for each wall", "walllist", "=", "list", "(", "adict1", ".", "keys", "(", ")", ")", "walllist", ".", "sort", "(", ")", "for", "j", "in", "range", "(", "0", ",", "len", "(", "walllist", ")", ")", ":", "windlist", "=", "[", "]", "adict2", "=", "adict1", "[", "walllist", "[", "j", "]", "]", "#loop thru the entire blist dictonary and list the ones that belong into windlist", "for", "num", "in", "range", "(", "0", ",", "len", "(", "blist", ")", ")", ":", "if", "bdict", "[", "blist", "[", "num", "]", "]", "==", "walllist", "[", "j", "]", ":", "windlist", "=", "windlist", "+", "[", "blist", "[", "num", "]", "]", "#put walllist into dict", "for", "k", "in", "range", "(", "0", ",", "len", "(", "windlist", ")", ")", ":", "adict2", "[", "windlist", "[", "k", "]", "]", "=", "{", "}", "return", "doedict" ]
makedoetree
[ "makedoetree" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L123-L173
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
tree2doe
def tree2doe(str1): """tree2doe""" retstuff = makedoedict(str1) ddict = makedoetree(retstuff[0], retstuff[1]) ddict = retstuff[0] retstuff[1] = {}# don't need it anymore str1 = ''#just re-using it l1list = list(ddict.keys()) l1list.sort() for i in range(0, len(l1list)): str1 = str1 + ddict[l1list[i]] l2list = list(ddict[l1list[i]].keys()) l2list.sort() for j in range(0, len(l2list)): str1 = str1 + ddict[l2list[j]] l3list = list(ddict[l1list[i]][l2list[j]].keys()) l3list.sort() for k in range(0, len(l3list)): str1 = str1 + ddict[l3list[k]] return str1
python
def tree2doe(str1): """tree2doe""" retstuff = makedoedict(str1) ddict = makedoetree(retstuff[0], retstuff[1]) ddict = retstuff[0] retstuff[1] = {}# don't need it anymore str1 = ''#just re-using it l1list = list(ddict.keys()) l1list.sort() for i in range(0, len(l1list)): str1 = str1 + ddict[l1list[i]] l2list = list(ddict[l1list[i]].keys()) l2list.sort() for j in range(0, len(l2list)): str1 = str1 + ddict[l2list[j]] l3list = list(ddict[l1list[i]][l2list[j]].keys()) l3list.sort() for k in range(0, len(l3list)): str1 = str1 + ddict[l3list[k]] return str1
[ "def", "tree2doe", "(", "str1", ")", ":", "retstuff", "=", "makedoedict", "(", "str1", ")", "ddict", "=", "makedoetree", "(", "retstuff", "[", "0", "]", ",", "retstuff", "[", "1", "]", ")", "ddict", "=", "retstuff", "[", "0", "]", "retstuff", "[", "1", "]", "=", "{", "}", "# don't need it anymore", "str1", "=", "''", "#just re-using it", "l1list", "=", "list", "(", "ddict", ".", "keys", "(", ")", ")", "l1list", ".", "sort", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "l1list", ")", ")", ":", "str1", "=", "str1", "+", "ddict", "[", "l1list", "[", "i", "]", "]", "l2list", "=", "list", "(", "ddict", "[", "l1list", "[", "i", "]", "]", ".", "keys", "(", ")", ")", "l2list", ".", "sort", "(", ")", "for", "j", "in", "range", "(", "0", ",", "len", "(", "l2list", ")", ")", ":", "str1", "=", "str1", "+", "ddict", "[", "l2list", "[", "j", "]", "]", "l3list", "=", "list", "(", "ddict", "[", "l1list", "[", "i", "]", "]", "[", "l2list", "[", "j", "]", "]", ".", "keys", "(", ")", ")", "l3list", ".", "sort", "(", ")", "for", "k", "in", "range", "(", "0", ",", "len", "(", "l3list", ")", ")", ":", "str1", "=", "str1", "+", "ddict", "[", "l3list", "[", "k", "]", "]", "return", "str1" ]
tree2doe
[ "tree2doe" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L175-L195
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
mtabstr2doestr
def mtabstr2doestr(st1): """mtabstr2doestr""" seperator = '$ ==============' alist = st1.split(seperator) #this removes all the tabs that excel #puts after the seperator and before the next line for num in range(0, len(alist)): alist[num] = alist[num].lstrip() st2 = '' for num in range(0, len(alist)): alist = tabstr2list(alist[num]) st2 = st2 + list2doe(alist) lss = st2.split('..') mylib1.write_str2file('forfinal.txt', st2)#for debugging print(len(lss)) st3 = tree2doe(st2) lsss = st3.split('..') print(len(lsss)) return st3
python
def mtabstr2doestr(st1): """mtabstr2doestr""" seperator = '$ ==============' alist = st1.split(seperator) #this removes all the tabs that excel #puts after the seperator and before the next line for num in range(0, len(alist)): alist[num] = alist[num].lstrip() st2 = '' for num in range(0, len(alist)): alist = tabstr2list(alist[num]) st2 = st2 + list2doe(alist) lss = st2.split('..') mylib1.write_str2file('forfinal.txt', st2)#for debugging print(len(lss)) st3 = tree2doe(st2) lsss = st3.split('..') print(len(lsss)) return st3
[ "def", "mtabstr2doestr", "(", "st1", ")", ":", "seperator", "=", "'$ =============='", "alist", "=", "st1", ".", "split", "(", "seperator", ")", "#this removes all the tabs that excel", "#puts after the seperator and before the next line", "for", "num", "in", "range", "(", "0", ",", "len", "(", "alist", ")", ")", ":", "alist", "[", "num", "]", "=", "alist", "[", "num", "]", ".", "lstrip", "(", ")", "st2", "=", "''", "for", "num", "in", "range", "(", "0", ",", "len", "(", "alist", ")", ")", ":", "alist", "=", "tabstr2list", "(", "alist", "[", "num", "]", ")", "st2", "=", "st2", "+", "list2doe", "(", "alist", ")", "lss", "=", "st2", ".", "split", "(", "'..'", ")", "mylib1", ".", "write_str2file", "(", "'forfinal.txt'", ",", "st2", ")", "#for debugging", "print", "(", "len", "(", "lss", ")", ")", "st3", "=", "tree2doe", "(", "st2", ")", "lsss", "=", "st3", ".", "split", "(", "'..'", ")", "print", "(", "len", "(", "lsss", ")", ")", "return", "st3" ]
mtabstr2doestr
[ "mtabstr2doestr" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L197-L219
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
getoneblock
def getoneblock(astr, start, end): """get the block bounded by start and end doesn't work for multiple blocks""" alist = astr.split(start) astr = alist[-1] alist = astr.split(end) astr = alist[0] return astr
python
def getoneblock(astr, start, end): """get the block bounded by start and end doesn't work for multiple blocks""" alist = astr.split(start) astr = alist[-1] alist = astr.split(end) astr = alist[0] return astr
[ "def", "getoneblock", "(", "astr", ",", "start", ",", "end", ")", ":", "alist", "=", "astr", ".", "split", "(", "start", ")", "astr", "=", "alist", "[", "-", "1", "]", "alist", "=", "astr", ".", "split", "(", "end", ")", "astr", "=", "alist", "[", "0", "]", "return", "astr" ]
get the block bounded by start and end doesn't work for multiple blocks
[ "get", "the", "block", "bounded", "by", "start", "and", "end", "doesn", "t", "work", "for", "multiple", "blocks" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L221-L228
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
doestr2tabstr
def doestr2tabstr(astr, kword): """doestr2tabstr""" alist = astr.split('..') del astr #strip junk put .. back for num in range(0, len(alist)): alist[num] = alist[num].strip() alist[num] = alist[num] + os.linesep + '..' + os.linesep alist.pop() lblock = [] for num in range(0, len(alist)): linels = alist[num].split(os.linesep) firstline = linels[0] assignls = firstline.split('=') keyword = assignls[-1].strip() if keyword == kword: lblock = lblock + [alist[num]] #print firstline #get all val lval = [] for num in range(0, len(lblock)): block = lblock[num] linel = block.split(os.linesep) lvalin = [] for k in range(0, len(linel)): line = linel[k] assignl = line.split('=') if k == 0: lvalin = lvalin + [assignl[0]] else: if assignl[-1] == '..': assignl[-1] = '.' lvalin = lvalin + [assignl[-1]] lvalin.pop() lval = lval + [lvalin] #get keywords kwordl = [] block = lblock[0] linel = block.split(os.linesep) for k in range(0, len(linel)): line = linel[k] assignl = line.split('=') if k == 0: kword = ' = ' + assignl[1].strip() else: if assignl[0] == '..': assignl[0] = '.' else: assignl[0] = assignl[0] + '=' kword = assignl[0].strip() kwordl = kwordl + [kword] kwordl.pop() astr = '' for num in range(0, len(kwordl)): linest = '' linest = linest + kwordl[num] for k in range(0, len(lval)): linest = linest + '\t' + lval[k][num] astr = astr + linest + os.linesep return astr
python
def doestr2tabstr(astr, kword): """doestr2tabstr""" alist = astr.split('..') del astr #strip junk put .. back for num in range(0, len(alist)): alist[num] = alist[num].strip() alist[num] = alist[num] + os.linesep + '..' + os.linesep alist.pop() lblock = [] for num in range(0, len(alist)): linels = alist[num].split(os.linesep) firstline = linels[0] assignls = firstline.split('=') keyword = assignls[-1].strip() if keyword == kword: lblock = lblock + [alist[num]] #print firstline #get all val lval = [] for num in range(0, len(lblock)): block = lblock[num] linel = block.split(os.linesep) lvalin = [] for k in range(0, len(linel)): line = linel[k] assignl = line.split('=') if k == 0: lvalin = lvalin + [assignl[0]] else: if assignl[-1] == '..': assignl[-1] = '.' lvalin = lvalin + [assignl[-1]] lvalin.pop() lval = lval + [lvalin] #get keywords kwordl = [] block = lblock[0] linel = block.split(os.linesep) for k in range(0, len(linel)): line = linel[k] assignl = line.split('=') if k == 0: kword = ' = ' + assignl[1].strip() else: if assignl[0] == '..': assignl[0] = '.' else: assignl[0] = assignl[0] + '=' kword = assignl[0].strip() kwordl = kwordl + [kword] kwordl.pop() astr = '' for num in range(0, len(kwordl)): linest = '' linest = linest + kwordl[num] for k in range(0, len(lval)): linest = linest + '\t' + lval[k][num] astr = astr + linest + os.linesep return astr
[ "def", "doestr2tabstr", "(", "astr", ",", "kword", ")", ":", "alist", "=", "astr", ".", "split", "(", "'..'", ")", "del", "astr", "#strip junk put .. back", "for", "num", "in", "range", "(", "0", ",", "len", "(", "alist", ")", ")", ":", "alist", "[", "num", "]", "=", "alist", "[", "num", "]", ".", "strip", "(", ")", "alist", "[", "num", "]", "=", "alist", "[", "num", "]", "+", "os", ".", "linesep", "+", "'..'", "+", "os", ".", "linesep", "alist", ".", "pop", "(", ")", "lblock", "=", "[", "]", "for", "num", "in", "range", "(", "0", ",", "len", "(", "alist", ")", ")", ":", "linels", "=", "alist", "[", "num", "]", ".", "split", "(", "os", ".", "linesep", ")", "firstline", "=", "linels", "[", "0", "]", "assignls", "=", "firstline", ".", "split", "(", "'='", ")", "keyword", "=", "assignls", "[", "-", "1", "]", ".", "strip", "(", ")", "if", "keyword", "==", "kword", ":", "lblock", "=", "lblock", "+", "[", "alist", "[", "num", "]", "]", "#print firstline", "#get all val", "lval", "=", "[", "]", "for", "num", "in", "range", "(", "0", ",", "len", "(", "lblock", ")", ")", ":", "block", "=", "lblock", "[", "num", "]", "linel", "=", "block", ".", "split", "(", "os", ".", "linesep", ")", "lvalin", "=", "[", "]", "for", "k", "in", "range", "(", "0", ",", "len", "(", "linel", ")", ")", ":", "line", "=", "linel", "[", "k", "]", "assignl", "=", "line", ".", "split", "(", "'='", ")", "if", "k", "==", "0", ":", "lvalin", "=", "lvalin", "+", "[", "assignl", "[", "0", "]", "]", "else", ":", "if", "assignl", "[", "-", "1", "]", "==", "'..'", ":", "assignl", "[", "-", "1", "]", "=", "'.'", "lvalin", "=", "lvalin", "+", "[", "assignl", "[", "-", "1", "]", "]", "lvalin", ".", "pop", "(", ")", "lval", "=", "lval", "+", "[", "lvalin", "]", "#get keywords", "kwordl", "=", "[", "]", "block", "=", "lblock", "[", "0", "]", "linel", "=", "block", ".", "split", "(", "os", ".", "linesep", ")", "for", "k", "in", "range", "(", "0", ",", "len", "(", "linel", ")", ")", ":", "line", "=", "linel", "[", "k", "]", "assignl", "=", "line", ".", "split", "(", "'='", ")", "if", "k", "==", "0", ":", "kword", "=", "' = '", "+", "assignl", "[", "1", "]", ".", "strip", "(", ")", "else", ":", "if", "assignl", "[", "0", "]", "==", "'..'", ":", "assignl", "[", "0", "]", "=", "'.'", "else", ":", "assignl", "[", "0", "]", "=", "assignl", "[", "0", "]", "+", "'='", "kword", "=", "assignl", "[", "0", "]", ".", "strip", "(", ")", "kwordl", "=", "kwordl", "+", "[", "kword", "]", "kwordl", ".", "pop", "(", ")", "astr", "=", "''", "for", "num", "in", "range", "(", "0", ",", "len", "(", "kwordl", ")", ")", ":", "linest", "=", "''", "linest", "=", "linest", "+", "kwordl", "[", "num", "]", "for", "k", "in", "range", "(", "0", ",", "len", "(", "lval", ")", ")", ":", "linest", "=", "linest", "+", "'\\t'", "+", "lval", "[", "k", "]", "[", "num", "]", "astr", "=", "astr", "+", "linest", "+", "os", ".", "linesep", "return", "astr" ]
doestr2tabstr
[ "doestr2tabstr" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L230-L294
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
myreplace
def myreplace(astr, thefind, thereplace): """in string astr replace all occurences of thefind with thereplace""" alist = astr.split(thefind) new_s = alist.split(thereplace) return new_s
python
def myreplace(astr, thefind, thereplace): """in string astr replace all occurences of thefind with thereplace""" alist = astr.split(thefind) new_s = alist.split(thereplace) return new_s
[ "def", "myreplace", "(", "astr", ",", "thefind", ",", "thereplace", ")", ":", "alist", "=", "astr", ".", "split", "(", "thefind", ")", "new_s", "=", "alist", ".", "split", "(", "thereplace", ")", "return", "new_s" ]
in string astr replace all occurences of thefind with thereplace
[ "in", "string", "astr", "replace", "all", "occurences", "of", "thefind", "with", "thereplace" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L296-L300
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
fsliceafter
def fsliceafter(astr, sub): """Return the slice after at sub in string astr""" findex = astr.find(sub) return astr[findex + len(sub):]
python
def fsliceafter(astr, sub): """Return the slice after at sub in string astr""" findex = astr.find(sub) return astr[findex + len(sub):]
[ "def", "fsliceafter", "(", "astr", ",", "sub", ")", ":", "findex", "=", "astr", ".", "find", "(", "sub", ")", "return", "astr", "[", "findex", "+", "len", "(", "sub", ")", ":", "]" ]
Return the slice after at sub in string astr
[ "Return", "the", "slice", "after", "at", "sub", "in", "string", "astr" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L307-L310
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib2.py
pickledump
def pickledump(theobject, fname): """same as pickle.dump(theobject, fhandle).takes filename as parameter""" fhandle = open(fname, 'wb') pickle.dump(theobject, fhandle)
python
def pickledump(theobject, fname): """same as pickle.dump(theobject, fhandle).takes filename as parameter""" fhandle = open(fname, 'wb') pickle.dump(theobject, fhandle)
[ "def", "pickledump", "(", "theobject", ",", "fname", ")", ":", "fhandle", "=", "open", "(", "fname", ",", "'wb'", ")", "pickle", ".", "dump", "(", "theobject", ",", "fhandle", ")" ]
same as pickle.dump(theobject, fhandle).takes filename as parameter
[ "same", "as", "pickle", ".", "dump", "(", "theobject", "fhandle", ")", ".", "takes", "filename", "as", "parameter" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib2.py#L317-L320
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/mylib1.py
write_str2file
def write_str2file(pathname, astr): """writes a string to file""" fname = pathname fhandle = open(fname, 'wb') fhandle.write(astr) fhandle.close()
python
def write_str2file(pathname, astr): """writes a string to file""" fname = pathname fhandle = open(fname, 'wb') fhandle.write(astr) fhandle.close()
[ "def", "write_str2file", "(", "pathname", ",", "astr", ")", ":", "fname", "=", "pathname", "fhandle", "=", "open", "(", "fname", ",", "'wb'", ")", "fhandle", ".", "write", "(", "astr", ")", "fhandle", ".", "close", "(", ")" ]
writes a string to file
[ "writes", "a", "string", "to", "file" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/mylib1.py#L14-L19
santoshphilip/eppy
eppy/geometry/int2lines.py
vol_tehrahedron
def vol_tehrahedron(poly): """volume of a irregular tetrahedron""" a_pnt = np.array(poly[0]) b_pnt = np.array(poly[1]) c_pnt = np.array(poly[2]) d_pnt = np.array(poly[3]) return abs(np.dot( (a_pnt-d_pnt), np.cross((b_pnt-d_pnt), (c_pnt-d_pnt))) / 6)
python
def vol_tehrahedron(poly): """volume of a irregular tetrahedron""" a_pnt = np.array(poly[0]) b_pnt = np.array(poly[1]) c_pnt = np.array(poly[2]) d_pnt = np.array(poly[3]) return abs(np.dot( (a_pnt-d_pnt), np.cross((b_pnt-d_pnt), (c_pnt-d_pnt))) / 6)
[ "def", "vol_tehrahedron", "(", "poly", ")", ":", "a_pnt", "=", "np", ".", "array", "(", "poly", "[", "0", "]", ")", "b_pnt", "=", "np", ".", "array", "(", "poly", "[", "1", "]", ")", "c_pnt", "=", "np", ".", "array", "(", "poly", "[", "2", "]", ")", "d_pnt", "=", "np", ".", "array", "(", "poly", "[", "3", "]", ")", "return", "abs", "(", "np", ".", "dot", "(", "(", "a_pnt", "-", "d_pnt", ")", ",", "np", ".", "cross", "(", "(", "b_pnt", "-", "d_pnt", ")", ",", "(", "c_pnt", "-", "d_pnt", ")", ")", ")", "/", "6", ")" ]
volume of a irregular tetrahedron
[ "volume", "of", "a", "irregular", "tetrahedron" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/int2lines.py#L22-L29
santoshphilip/eppy
eppy/geometry/int2lines.py
vol_zone
def vol_zone(poly1, poly2): """"volume of a zone defined by two polygon bases """ c_point = central_p(poly1, poly2) c_point = (c_point[0], c_point[1], c_point[2]) vol_therah = 0 num = len(poly1) for i in range(num-2): # the upper part tehrahedron = [c_point, poly1[0], poly1[i+1], poly1[i+2]] vol_therah += vol_tehrahedron(tehrahedron) # the bottom part tehrahedron = [c_point, poly2[0], poly2[i+1], poly2[i+2]] vol_therah += vol_tehrahedron(tehrahedron) # the middle part for i in range(num-1): tehrahedron = [c_point, poly1[i], poly2[i], poly2[i+1]] vol_therah += vol_tehrahedron(tehrahedron) tehrahedron = [c_point, poly1[i], poly1[i+1], poly2[i]] vol_therah += vol_tehrahedron(tehrahedron) tehrahedron = [c_point, poly1[num-1], poly2[num-1], poly2[0]] vol_therah += vol_tehrahedron(tehrahedron) tehrahedron = [c_point, poly1[num-1], poly1[0], poly2[0]] vol_therah += vol_tehrahedron(tehrahedron) return vol_therah
python
def vol_zone(poly1, poly2): """"volume of a zone defined by two polygon bases """ c_point = central_p(poly1, poly2) c_point = (c_point[0], c_point[1], c_point[2]) vol_therah = 0 num = len(poly1) for i in range(num-2): # the upper part tehrahedron = [c_point, poly1[0], poly1[i+1], poly1[i+2]] vol_therah += vol_tehrahedron(tehrahedron) # the bottom part tehrahedron = [c_point, poly2[0], poly2[i+1], poly2[i+2]] vol_therah += vol_tehrahedron(tehrahedron) # the middle part for i in range(num-1): tehrahedron = [c_point, poly1[i], poly2[i], poly2[i+1]] vol_therah += vol_tehrahedron(tehrahedron) tehrahedron = [c_point, poly1[i], poly1[i+1], poly2[i]] vol_therah += vol_tehrahedron(tehrahedron) tehrahedron = [c_point, poly1[num-1], poly2[num-1], poly2[0]] vol_therah += vol_tehrahedron(tehrahedron) tehrahedron = [c_point, poly1[num-1], poly1[0], poly2[0]] vol_therah += vol_tehrahedron(tehrahedron) return vol_therah
[ "def", "vol_zone", "(", "poly1", ",", "poly2", ")", ":", "c_point", "=", "central_p", "(", "poly1", ",", "poly2", ")", "c_point", "=", "(", "c_point", "[", "0", "]", ",", "c_point", "[", "1", "]", ",", "c_point", "[", "2", "]", ")", "vol_therah", "=", "0", "num", "=", "len", "(", "poly1", ")", "for", "i", "in", "range", "(", "num", "-", "2", ")", ":", "# the upper part", "tehrahedron", "=", "[", "c_point", ",", "poly1", "[", "0", "]", ",", "poly1", "[", "i", "+", "1", "]", ",", "poly1", "[", "i", "+", "2", "]", "]", "vol_therah", "+=", "vol_tehrahedron", "(", "tehrahedron", ")", "# the bottom part", "tehrahedron", "=", "[", "c_point", ",", "poly2", "[", "0", "]", ",", "poly2", "[", "i", "+", "1", "]", ",", "poly2", "[", "i", "+", "2", "]", "]", "vol_therah", "+=", "vol_tehrahedron", "(", "tehrahedron", ")", "# the middle part", "for", "i", "in", "range", "(", "num", "-", "1", ")", ":", "tehrahedron", "=", "[", "c_point", ",", "poly1", "[", "i", "]", ",", "poly2", "[", "i", "]", ",", "poly2", "[", "i", "+", "1", "]", "]", "vol_therah", "+=", "vol_tehrahedron", "(", "tehrahedron", ")", "tehrahedron", "=", "[", "c_point", ",", "poly1", "[", "i", "]", ",", "poly1", "[", "i", "+", "1", "]", ",", "poly2", "[", "i", "]", "]", "vol_therah", "+=", "vol_tehrahedron", "(", "tehrahedron", ")", "tehrahedron", "=", "[", "c_point", ",", "poly1", "[", "num", "-", "1", "]", ",", "poly2", "[", "num", "-", "1", "]", ",", "poly2", "[", "0", "]", "]", "vol_therah", "+=", "vol_tehrahedron", "(", "tehrahedron", ")", "tehrahedron", "=", "[", "c_point", ",", "poly1", "[", "num", "-", "1", "]", ",", "poly1", "[", "0", "]", ",", "poly2", "[", "0", "]", "]", "vol_therah", "+=", "vol_tehrahedron", "(", "tehrahedron", ")", "return", "vol_therah" ]
volume of a zone defined by two polygon bases
[ "volume", "of", "a", "zone", "defined", "by", "two", "polygon", "bases" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/int2lines.py#L43-L66
santoshphilip/eppy
eppy/__init__.py
newidf
def newidf(version=None): """open a new idf file easy way to open a new idf file for particular version. Works only id Energyplus of that version is installed. Parameters ---------- version: string version of the new file you want to create. Will work only if this version of Energyplus has been installed. Returns ------- idf file of type eppy.modelmake.IDF """ # noqa: E501 if not version: version = "8.9" import eppy.easyopen as easyopen idfstring = " Version,{};".format(str(version)) fhandle = StringIO(idfstring) return easyopen.easyopen(fhandle)
python
def newidf(version=None): """open a new idf file easy way to open a new idf file for particular version. Works only id Energyplus of that version is installed. Parameters ---------- version: string version of the new file you want to create. Will work only if this version of Energyplus has been installed. Returns ------- idf file of type eppy.modelmake.IDF """ # noqa: E501 if not version: version = "8.9" import eppy.easyopen as easyopen idfstring = " Version,{};".format(str(version)) fhandle = StringIO(idfstring) return easyopen.easyopen(fhandle)
[ "def", "newidf", "(", "version", "=", "None", ")", ":", "# noqa: E501", "if", "not", "version", ":", "version", "=", "\"8.9\"", "import", "eppy", ".", "easyopen", "as", "easyopen", "idfstring", "=", "\" Version,{};\"", ".", "format", "(", "str", "(", "version", ")", ")", "fhandle", "=", "StringIO", "(", "idfstring", ")", "return", "easyopen", ".", "easyopen", "(", "fhandle", ")" ]
open a new idf file easy way to open a new idf file for particular version. Works only id Energyplus of that version is installed. Parameters ---------- version: string version of the new file you want to create. Will work only if this version of Energyplus has been installed. Returns ------- idf file of type eppy.modelmake.IDF
[ "open", "a", "new", "idf", "file", "easy", "way", "to", "open", "a", "new", "idf", "file", "for", "particular", "version", ".", "Works", "only", "id", "Energyplus", "of", "that", "version", "is", "installed", ".", "Parameters", "----------", "version", ":", "string", "version", "of", "the", "new", "file", "you", "want", "to", "create", ".", "Will", "work", "only", "if", "this", "version", "of", "Energyplus", "has", "been", "installed", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/__init__.py#L25-L45
santoshphilip/eppy
eppy/__init__.py
openidf
def openidf(fname, idd=None, epw=None): """automatically set idd and open idf file. Uses version from idf to set correct idd It will work under the following circumstances: - the IDF file should have the VERSION object. - Needs the version of EnergyPlus installed that matches the IDF version. - Energyplus should be installed in the default location. Parameters ---------- fname : str, StringIO or IOBase Filepath IDF file, File handle of IDF file open to read StringIO with IDF contents within idd : str, StringIO or IOBase This is an optional argument. easyopen will find the IDD without this arg Filepath IDD file, File handle of IDD file open to read StringIO with IDD contents within epw : str path name to the weather file. This arg is needed to run EneryPlus from eppy. """ import eppy.easyopen as easyopen return easyopen.easyopen(fname, idd=idd, epw=epw)
python
def openidf(fname, idd=None, epw=None): """automatically set idd and open idf file. Uses version from idf to set correct idd It will work under the following circumstances: - the IDF file should have the VERSION object. - Needs the version of EnergyPlus installed that matches the IDF version. - Energyplus should be installed in the default location. Parameters ---------- fname : str, StringIO or IOBase Filepath IDF file, File handle of IDF file open to read StringIO with IDF contents within idd : str, StringIO or IOBase This is an optional argument. easyopen will find the IDD without this arg Filepath IDD file, File handle of IDD file open to read StringIO with IDD contents within epw : str path name to the weather file. This arg is needed to run EneryPlus from eppy. """ import eppy.easyopen as easyopen return easyopen.easyopen(fname, idd=idd, epw=epw)
[ "def", "openidf", "(", "fname", ",", "idd", "=", "None", ",", "epw", "=", "None", ")", ":", "import", "eppy", ".", "easyopen", "as", "easyopen", "return", "easyopen", ".", "easyopen", "(", "fname", ",", "idd", "=", "idd", ",", "epw", "=", "epw", ")" ]
automatically set idd and open idf file. Uses version from idf to set correct idd It will work under the following circumstances: - the IDF file should have the VERSION object. - Needs the version of EnergyPlus installed that matches the IDF version. - Energyplus should be installed in the default location. Parameters ---------- fname : str, StringIO or IOBase Filepath IDF file, File handle of IDF file open to read StringIO with IDF contents within idd : str, StringIO or IOBase This is an optional argument. easyopen will find the IDD without this arg Filepath IDD file, File handle of IDD file open to read StringIO with IDD contents within epw : str path name to the weather file. This arg is needed to run EneryPlus from eppy.
[ "automatically", "set", "idd", "and", "open", "idf", "file", ".", "Uses", "version", "from", "idf", "to", "set", "correct", "idd", "It", "will", "work", "under", "the", "following", "circumstances", ":", "-", "the", "IDF", "file", "should", "have", "the", "VERSION", "object", ".", "-", "Needs", "the", "version", "of", "EnergyPlus", "installed", "that", "matches", "the", "IDF", "version", ".", "-", "Energyplus", "should", "be", "installed", "in", "the", "default", "location", "." ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/__init__.py#L47-L70
santoshphilip/eppy
eppy/simplesurface.py
wallinterzone
def wallinterzone(idf, bsdobject, deletebsd=True, setto000=False): """return an wall:interzone object if the bsd (buildingsurface:detailed) is an interaone wall""" # ('WALL:INTERZONE', Wall, Surface OR Zone OR OtherSideCoefficients) # test if it is an exterior wall if bsdobject.Surface_Type.upper() == 'WALL': # Surface_Type == wall if bsdobject.Outside_Boundary_Condition.upper() in ('SURFACE', 'ZONE', 'OtherSideCoefficients'.upper()): simpleobject = idf.newidfobject('WALL:INTERZONE') simpleobject.Name = bsdobject.Name simpleobject.Construction_Name = bsdobject.Construction_Name simpleobject.Zone_Name = bsdobject.Zone_Name obco = 'Outside_Boundary_Condition_Object' simpleobject[obco] = bsdobject[obco] simpleobject.Azimuth_Angle = bsdobject.azimuth simpleobject.Tilt_Angle = bsdobject.tilt surforigin = bsdorigin(bsdobject, setto000=setto000) simpleobject.Starting_X_Coordinate = surforigin[0] simpleobject.Starting_Y_Coordinate = surforigin[1] simpleobject.Starting_Z_Coordinate = surforigin[2] simpleobject.Length = bsdobject.width simpleobject.Height = bsdobject.height if deletebsd: idf.removeidfobject(bsdobject) return simpleobject return None
python
def wallinterzone(idf, bsdobject, deletebsd=True, setto000=False): """return an wall:interzone object if the bsd (buildingsurface:detailed) is an interaone wall""" # ('WALL:INTERZONE', Wall, Surface OR Zone OR OtherSideCoefficients) # test if it is an exterior wall if bsdobject.Surface_Type.upper() == 'WALL': # Surface_Type == wall if bsdobject.Outside_Boundary_Condition.upper() in ('SURFACE', 'ZONE', 'OtherSideCoefficients'.upper()): simpleobject = idf.newidfobject('WALL:INTERZONE') simpleobject.Name = bsdobject.Name simpleobject.Construction_Name = bsdobject.Construction_Name simpleobject.Zone_Name = bsdobject.Zone_Name obco = 'Outside_Boundary_Condition_Object' simpleobject[obco] = bsdobject[obco] simpleobject.Azimuth_Angle = bsdobject.azimuth simpleobject.Tilt_Angle = bsdobject.tilt surforigin = bsdorigin(bsdobject, setto000=setto000) simpleobject.Starting_X_Coordinate = surforigin[0] simpleobject.Starting_Y_Coordinate = surforigin[1] simpleobject.Starting_Z_Coordinate = surforigin[2] simpleobject.Length = bsdobject.width simpleobject.Height = bsdobject.height if deletebsd: idf.removeidfobject(bsdobject) return simpleobject return None
[ "def", "wallinterzone", "(", "idf", ",", "bsdobject", ",", "deletebsd", "=", "True", ",", "setto000", "=", "False", ")", ":", "# ('WALL:INTERZONE', Wall, Surface OR Zone OR OtherSideCoefficients)", "# test if it is an exterior wall", "if", "bsdobject", ".", "Surface_Type", ".", "upper", "(", ")", "==", "'WALL'", ":", "# Surface_Type == wall", "if", "bsdobject", ".", "Outside_Boundary_Condition", ".", "upper", "(", ")", "in", "(", "'SURFACE'", ",", "'ZONE'", ",", "'OtherSideCoefficients'", ".", "upper", "(", ")", ")", ":", "simpleobject", "=", "idf", ".", "newidfobject", "(", "'WALL:INTERZONE'", ")", "simpleobject", ".", "Name", "=", "bsdobject", ".", "Name", "simpleobject", ".", "Construction_Name", "=", "bsdobject", ".", "Construction_Name", "simpleobject", ".", "Zone_Name", "=", "bsdobject", ".", "Zone_Name", "obco", "=", "'Outside_Boundary_Condition_Object'", "simpleobject", "[", "obco", "]", "=", "bsdobject", "[", "obco", "]", "simpleobject", ".", "Azimuth_Angle", "=", "bsdobject", ".", "azimuth", "simpleobject", ".", "Tilt_Angle", "=", "bsdobject", ".", "tilt", "surforigin", "=", "bsdorigin", "(", "bsdobject", ",", "setto000", "=", "setto000", ")", "simpleobject", ".", "Starting_X_Coordinate", "=", "surforigin", "[", "0", "]", "simpleobject", ".", "Starting_Y_Coordinate", "=", "surforigin", "[", "1", "]", "simpleobject", ".", "Starting_Z_Coordinate", "=", "surforigin", "[", "2", "]", "simpleobject", ".", "Length", "=", "bsdobject", ".", "width", "simpleobject", ".", "Height", "=", "bsdobject", ".", "height", "if", "deletebsd", ":", "idf", ".", "removeidfobject", "(", "bsdobject", ")", "return", "simpleobject", "return", "None" ]
return an wall:interzone object if the bsd (buildingsurface:detailed) is an interaone wall
[ "return", "an", "wall", ":", "interzone", "object", "if", "the", "bsd", "(", "buildingsurface", ":", "detailed", ")", "is", "an", "interaone", "wall" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/simplesurface.py#L168-L192
santoshphilip/eppy
eppy/simplesurface.py
door
def door(idf, fsdobject, deletebsd=True, setto000=False): """return an door object if the fsd (fenestrationsurface:detailed) is a door""" # ('DOOR', Door, None) # test if it is aroof if fsdobject.Surface_Type.upper() == 'DOOR': # Surface_Type == w simpleobject = idf.newidfobject('DOOR') simpleobject.Name = fsdobject.Name simpleobject.Construction_Name = fsdobject.Construction_Name simpleobject.Building_Surface_Name = fsdobject.Building_Surface_Name simpleobject.Multiplier = fsdobject.Multiplier surforigin = fsdorigin(fsdobject, setto000=setto000) simpleobject.Starting_X_Coordinate = surforigin[0] simpleobject.Starting_Z_Coordinate = surforigin[1] simpleobject.Length = fsdobject.width simpleobject.Height = fsdobject.height if deletebsd: idf.removeidfobject(fsdobject) return simpleobject return None
python
def door(idf, fsdobject, deletebsd=True, setto000=False): """return an door object if the fsd (fenestrationsurface:detailed) is a door""" # ('DOOR', Door, None) # test if it is aroof if fsdobject.Surface_Type.upper() == 'DOOR': # Surface_Type == w simpleobject = idf.newidfobject('DOOR') simpleobject.Name = fsdobject.Name simpleobject.Construction_Name = fsdobject.Construction_Name simpleobject.Building_Surface_Name = fsdobject.Building_Surface_Name simpleobject.Multiplier = fsdobject.Multiplier surforigin = fsdorigin(fsdobject, setto000=setto000) simpleobject.Starting_X_Coordinate = surforigin[0] simpleobject.Starting_Z_Coordinate = surforigin[1] simpleobject.Length = fsdobject.width simpleobject.Height = fsdobject.height if deletebsd: idf.removeidfobject(fsdobject) return simpleobject return None
[ "def", "door", "(", "idf", ",", "fsdobject", ",", "deletebsd", "=", "True", ",", "setto000", "=", "False", ")", ":", "# ('DOOR', Door, None)", "# test if it is aroof", "if", "fsdobject", ".", "Surface_Type", ".", "upper", "(", ")", "==", "'DOOR'", ":", "# Surface_Type == w", "simpleobject", "=", "idf", ".", "newidfobject", "(", "'DOOR'", ")", "simpleobject", ".", "Name", "=", "fsdobject", ".", "Name", "simpleobject", ".", "Construction_Name", "=", "fsdobject", ".", "Construction_Name", "simpleobject", ".", "Building_Surface_Name", "=", "fsdobject", ".", "Building_Surface_Name", "simpleobject", ".", "Multiplier", "=", "fsdobject", ".", "Multiplier", "surforigin", "=", "fsdorigin", "(", "fsdobject", ",", "setto000", "=", "setto000", ")", "simpleobject", ".", "Starting_X_Coordinate", "=", "surforigin", "[", "0", "]", "simpleobject", ".", "Starting_Z_Coordinate", "=", "surforigin", "[", "1", "]", "simpleobject", ".", "Length", "=", "fsdobject", ".", "width", "simpleobject", ".", "Height", "=", "fsdobject", ".", "height", "if", "deletebsd", ":", "idf", ".", "removeidfobject", "(", "fsdobject", ")", "return", "simpleobject", "return", "None" ]
return an door object if the fsd (fenestrationsurface:detailed) is a door
[ "return", "an", "door", "object", "if", "the", "fsd", "(", "fenestrationsurface", ":", "detailed", ")", "is", "a", "door" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/simplesurface.py#L365-L384
santoshphilip/eppy
eppy/simplesurface.py
simplesurface
def simplesurface(idf, bsd, deletebsd=True, setto000=False): """convert a bsd (buildingsurface:detailed) into a simple surface""" funcs = (wallexterior, walladiabatic, wallunderground, wallinterzone, roof, ceilingadiabatic, ceilinginterzone, floorgroundcontact, flooradiabatic, floorinterzone,) for func in funcs: surface = func(idf, bsd, deletebsd=deletebsd, setto000=setto000) if surface: return surface return None
python
def simplesurface(idf, bsd, deletebsd=True, setto000=False): """convert a bsd (buildingsurface:detailed) into a simple surface""" funcs = (wallexterior, walladiabatic, wallunderground, wallinterzone, roof, ceilingadiabatic, ceilinginterzone, floorgroundcontact, flooradiabatic, floorinterzone,) for func in funcs: surface = func(idf, bsd, deletebsd=deletebsd, setto000=setto000) if surface: return surface return None
[ "def", "simplesurface", "(", "idf", ",", "bsd", ",", "deletebsd", "=", "True", ",", "setto000", "=", "False", ")", ":", "funcs", "=", "(", "wallexterior", ",", "walladiabatic", ",", "wallunderground", ",", "wallinterzone", ",", "roof", ",", "ceilingadiabatic", ",", "ceilinginterzone", ",", "floorgroundcontact", ",", "flooradiabatic", ",", "floorinterzone", ",", ")", "for", "func", "in", "funcs", ":", "surface", "=", "func", "(", "idf", ",", "bsd", ",", "deletebsd", "=", "deletebsd", ",", "setto000", "=", "setto000", ")", "if", "surface", ":", "return", "surface", "return", "None" ]
convert a bsd (buildingsurface:detailed) into a simple surface
[ "convert", "a", "bsd", "(", "buildingsurface", ":", "detailed", ")", "into", "a", "simple", "surface" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/simplesurface.py#L409-L425