repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
santoshphilip/eppy
eppy/idf_helpers.py
getobject_use_prevfield
def getobject_use_prevfield(idf, idfobject, fieldname): """field=object_name, prev_field=object_type. Return the object""" if not fieldname.endswith("Name"): return None # test if prevfieldname ends with "Object_Type" fdnames = idfobject.fieldnames ifieldname = fdnames.index(fieldname) prevfdname = fdnames[ifieldname - 1] if not prevfdname.endswith("Object_Type"): return None objkey = idfobject[prevfdname].upper() objname = idfobject[fieldname] try: foundobj = idf.getobject(objkey, objname) except KeyError as e: return None return foundobj
python
def getobject_use_prevfield(idf, idfobject, fieldname): """field=object_name, prev_field=object_type. Return the object""" if not fieldname.endswith("Name"): return None # test if prevfieldname ends with "Object_Type" fdnames = idfobject.fieldnames ifieldname = fdnames.index(fieldname) prevfdname = fdnames[ifieldname - 1] if not prevfdname.endswith("Object_Type"): return None objkey = idfobject[prevfdname].upper() objname = idfobject[fieldname] try: foundobj = idf.getobject(objkey, objname) except KeyError as e: return None return foundobj
[ "def", "getobject_use_prevfield", "(", "idf", ",", "idfobject", ",", "fieldname", ")", ":", "if", "not", "fieldname", ".", "endswith", "(", "\"Name\"", ")", ":", "return", "None", "# test if prevfieldname ends with \"Object_Type\"", "fdnames", "=", "idfobject", ".", "fieldnames", "ifieldname", "=", "fdnames", ".", "index", "(", "fieldname", ")", "prevfdname", "=", "fdnames", "[", "ifieldname", "-", "1", "]", "if", "not", "prevfdname", ".", "endswith", "(", "\"Object_Type\"", ")", ":", "return", "None", "objkey", "=", "idfobject", "[", "prevfdname", "]", ".", "upper", "(", ")", "objname", "=", "idfobject", "[", "fieldname", "]", "try", ":", "foundobj", "=", "idf", ".", "getobject", "(", "objkey", ",", "objname", ")", "except", "KeyError", "as", "e", ":", "return", "None", "return", "foundobj" ]
field=object_name, prev_field=object_type. Return the object
[ "field", "=", "object_name", "prev_field", "=", "object_type", ".", "Return", "the", "object" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idf_helpers.py#L44-L60
santoshphilip/eppy
eppy/idf_helpers.py
getidfkeyswithnodes
def getidfkeyswithnodes(): """return a list of keys of idfobjects that hve 'None Name' fields""" idf = IDF(StringIO("")) keys = idfobjectkeys(idf) keysfieldnames = ((key, idf.newidfobject(key.upper()).fieldnames) for key in keys) keysnodefdnames = ((key, (name for name in fdnames if (name.endswith('Node_Name')))) for key, fdnames in keysfieldnames) nodekeys = [key for key, fdnames in keysnodefdnames if list(fdnames)] return nodekeys
python
def getidfkeyswithnodes(): """return a list of keys of idfobjects that hve 'None Name' fields""" idf = IDF(StringIO("")) keys = idfobjectkeys(idf) keysfieldnames = ((key, idf.newidfobject(key.upper()).fieldnames) for key in keys) keysnodefdnames = ((key, (name for name in fdnames if (name.endswith('Node_Name')))) for key, fdnames in keysfieldnames) nodekeys = [key for key, fdnames in keysnodefdnames if list(fdnames)] return nodekeys
[ "def", "getidfkeyswithnodes", "(", ")", ":", "idf", "=", "IDF", "(", "StringIO", "(", "\"\"", ")", ")", "keys", "=", "idfobjectkeys", "(", "idf", ")", "keysfieldnames", "=", "(", "(", "key", ",", "idf", ".", "newidfobject", "(", "key", ".", "upper", "(", ")", ")", ".", "fieldnames", ")", "for", "key", "in", "keys", ")", "keysnodefdnames", "=", "(", "(", "key", ",", "(", "name", "for", "name", "in", "fdnames", "if", "(", "name", ".", "endswith", "(", "'Node_Name'", ")", ")", ")", ")", "for", "key", ",", "fdnames", "in", "keysfieldnames", ")", "nodekeys", "=", "[", "key", "for", "key", ",", "fdnames", "in", "keysnodefdnames", "if", "list", "(", "fdnames", ")", "]", "return", "nodekeys" ]
return a list of keys of idfobjects that hve 'None Name' fields
[ "return", "a", "list", "of", "keys", "of", "idfobjects", "that", "hve", "None", "Name", "fields" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idf_helpers.py#L62-L72
santoshphilip/eppy
eppy/idf_helpers.py
getobjectswithnode
def getobjectswithnode(idf, nodekeys, nodename): """return all objects that mention this node name""" keys = nodekeys # TODO getidfkeyswithnodes needs to be done only once. take out of here listofidfobjects = (idf.idfobjects[key.upper()] for key in keys if idf.idfobjects[key.upper()]) idfobjects = [idfobj for idfobjs in listofidfobjects for idfobj in idfobjs] objwithnodes = [] for obj in idfobjects: values = obj.fieldvalues fdnames = obj.fieldnames for value, fdname in zip(values, fdnames): if fdname.endswith('Node_Name'): if value == nodename: objwithnodes.append(obj) break return objwithnodes
python
def getobjectswithnode(idf, nodekeys, nodename): """return all objects that mention this node name""" keys = nodekeys # TODO getidfkeyswithnodes needs to be done only once. take out of here listofidfobjects = (idf.idfobjects[key.upper()] for key in keys if idf.idfobjects[key.upper()]) idfobjects = [idfobj for idfobjs in listofidfobjects for idfobj in idfobjs] objwithnodes = [] for obj in idfobjects: values = obj.fieldvalues fdnames = obj.fieldnames for value, fdname in zip(values, fdnames): if fdname.endswith('Node_Name'): if value == nodename: objwithnodes.append(obj) break return objwithnodes
[ "def", "getobjectswithnode", "(", "idf", ",", "nodekeys", ",", "nodename", ")", ":", "keys", "=", "nodekeys", "# TODO getidfkeyswithnodes needs to be done only once. take out of here", "listofidfobjects", "=", "(", "idf", ".", "idfobjects", "[", "key", ".", "upper", "(", ")", "]", "for", "key", "in", "keys", "if", "idf", ".", "idfobjects", "[", "key", ".", "upper", "(", ")", "]", ")", "idfobjects", "=", "[", "idfobj", "for", "idfobjs", "in", "listofidfobjects", "for", "idfobj", "in", "idfobjs", "]", "objwithnodes", "=", "[", "]", "for", "obj", "in", "idfobjects", ":", "values", "=", "obj", ".", "fieldvalues", "fdnames", "=", "obj", ".", "fieldnames", "for", "value", ",", "fdname", "in", "zip", "(", "values", ",", "fdnames", ")", ":", "if", "fdname", ".", "endswith", "(", "'Node_Name'", ")", ":", "if", "value", "==", "nodename", ":", "objwithnodes", ".", "append", "(", "obj", ")", "break", "return", "objwithnodes" ]
return all objects that mention this node name
[ "return", "all", "objects", "that", "mention", "this", "node", "name" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idf_helpers.py#L74-L92
santoshphilip/eppy
eppy/idf_helpers.py
name2idfobject
def name2idfobject(idf, groupnamess=None, objkeys=None, **kwargs): """return the object, if the Name or some other field is known. send filed in **kwargs as Name='a name', Roughness='smooth' Returns the first find (field search is unordered) objkeys -> if objkeys=['ZONE', 'Material'], search only those groupnames -> not yet coded""" # TODO : this is a very slow search. revist to speed it up. if not objkeys: objkeys = idfobjectkeys(idf) for objkey in objkeys: idfobjs = idf.idfobjects[objkey.upper()] for idfobj in idfobjs: for key, val in kwargs.items(): try: if idfobj[key] == val: return idfobj except BadEPFieldError as e: continue
python
def name2idfobject(idf, groupnamess=None, objkeys=None, **kwargs): """return the object, if the Name or some other field is known. send filed in **kwargs as Name='a name', Roughness='smooth' Returns the first find (field search is unordered) objkeys -> if objkeys=['ZONE', 'Material'], search only those groupnames -> not yet coded""" # TODO : this is a very slow search. revist to speed it up. if not objkeys: objkeys = idfobjectkeys(idf) for objkey in objkeys: idfobjs = idf.idfobjects[objkey.upper()] for idfobj in idfobjs: for key, val in kwargs.items(): try: if idfobj[key] == val: return idfobj except BadEPFieldError as e: continue
[ "def", "name2idfobject", "(", "idf", ",", "groupnamess", "=", "None", ",", "objkeys", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# TODO : this is a very slow search. revist to speed it up.", "if", "not", "objkeys", ":", "objkeys", "=", "idfobjectkeys", "(", "idf", ")", "for", "objkey", "in", "objkeys", ":", "idfobjs", "=", "idf", ".", "idfobjects", "[", "objkey", ".", "upper", "(", ")", "]", "for", "idfobj", "in", "idfobjs", ":", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ":", "try", ":", "if", "idfobj", "[", "key", "]", "==", "val", ":", "return", "idfobj", "except", "BadEPFieldError", "as", "e", ":", "continue" ]
return the object, if the Name or some other field is known. send filed in **kwargs as Name='a name', Roughness='smooth' Returns the first find (field search is unordered) objkeys -> if objkeys=['ZONE', 'Material'], search only those groupnames -> not yet coded
[ "return", "the", "object", "if", "the", "Name", "or", "some", "other", "field", "is", "known", ".", "send", "filed", "in", "**", "kwargs", "as", "Name", "=", "a", "name", "Roughness", "=", "smooth", "Returns", "the", "first", "find", "(", "field", "search", "is", "unordered", ")", "objkeys", "-", ">", "if", "objkeys", "=", "[", "ZONE", "Material", "]", "search", "only", "those", "groupnames", "-", ">", "not", "yet", "coded" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idf_helpers.py#L94-L111
santoshphilip/eppy
eppy/idf_helpers.py
getidfobjectlist
def getidfobjectlist(idf): """return a list of all idfobjects in idf""" idfobjects = idf.idfobjects # idfobjlst = [idfobjects[key] for key in idfobjects if idfobjects[key]] idfobjlst = [idfobjects[key] for key in idf.model.dtls if idfobjects[key]] # `for key in idf.model.dtls` maintains the order # `for key in idfobjects` does not have order idfobjlst = itertools.chain.from_iterable(idfobjlst) idfobjlst = list(idfobjlst) return idfobjlst
python
def getidfobjectlist(idf): """return a list of all idfobjects in idf""" idfobjects = idf.idfobjects # idfobjlst = [idfobjects[key] for key in idfobjects if idfobjects[key]] idfobjlst = [idfobjects[key] for key in idf.model.dtls if idfobjects[key]] # `for key in idf.model.dtls` maintains the order # `for key in idfobjects` does not have order idfobjlst = itertools.chain.from_iterable(idfobjlst) idfobjlst = list(idfobjlst) return idfobjlst
[ "def", "getidfobjectlist", "(", "idf", ")", ":", "idfobjects", "=", "idf", ".", "idfobjects", "# idfobjlst = [idfobjects[key] for key in idfobjects if idfobjects[key]]", "idfobjlst", "=", "[", "idfobjects", "[", "key", "]", "for", "key", "in", "idf", ".", "model", ".", "dtls", "if", "idfobjects", "[", "key", "]", "]", "# `for key in idf.model.dtls` maintains the order", "# `for key in idfobjects` does not have order", "idfobjlst", "=", "itertools", ".", "chain", ".", "from_iterable", "(", "idfobjlst", ")", "idfobjlst", "=", "list", "(", "idfobjlst", ")", "return", "idfobjlst" ]
return a list of all idfobjects in idf
[ "return", "a", "list", "of", "all", "idfobjects", "in", "idf" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idf_helpers.py#L113-L122
santoshphilip/eppy
eppy/idf_helpers.py
copyidfintoidf
def copyidfintoidf(toidf, fromidf): """copy fromidf completely into toidf""" idfobjlst = getidfobjectlist(fromidf) for idfobj in idfobjlst: toidf.copyidfobject(idfobj)
python
def copyidfintoidf(toidf, fromidf): """copy fromidf completely into toidf""" idfobjlst = getidfobjectlist(fromidf) for idfobj in idfobjlst: toidf.copyidfobject(idfobj)
[ "def", "copyidfintoidf", "(", "toidf", ",", "fromidf", ")", ":", "idfobjlst", "=", "getidfobjectlist", "(", "fromidf", ")", "for", "idfobj", "in", "idfobjlst", ":", "toidf", ".", "copyidfobject", "(", "idfobj", ")" ]
copy fromidf completely into toidf
[ "copy", "fromidf", "completely", "into", "toidf" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/idf_helpers.py#L124-L128
santoshphilip/eppy
eppy/useful_scripts/idfdiff_missing.py
idfdiffs
def idfdiffs(idf1, idf2): """return the diffs between the two idfs""" thediffs = {} keys = idf1.model.dtls # undocumented variable for akey in keys: idfobjs1 = idf1.idfobjects[akey] idfobjs2 = idf2.idfobjects[akey] names = set([getobjname(i) for i in idfobjs1] + [getobjname(i) for i in idfobjs2]) names = sorted(names) idfobjs1 = sorted(idfobjs1, key=lambda idfobj: idfobj['obj']) idfobjs2 = sorted(idfobjs2, key=lambda idfobj: idfobj['obj']) for name in names: n_idfobjs1 = [item for item in idfobjs1 if getobjname(item) == name] n_idfobjs2 = [item for item in idfobjs2 if getobjname(item) == name] for idfobj1, idfobj2 in itertools.zip_longest(n_idfobjs1, n_idfobjs2): if idfobj1 == None: thediffs[(idfobj2.key.upper(), getobjname(idfobj2))] = (None, idf1.idfname) #(idf1.idfname, None) break if idfobj2 == None: thediffs[(idfobj1.key.upper(), getobjname(idfobj1))] = (idf2.idfname, None) # (None, idf2.idfname) break # for i, (f1, f2) in enumerate(zip(idfobj1.obj, idfobj2.obj)): # if i == 0: # f1, f2 = f1.upper(), f2.upper() # if f1 != f2: # thediffs[(akey, # getobjname(idfobj1), # idfobj1.objidd[i]['field'][0])] = (f1, f2) return thediffs
python
def idfdiffs(idf1, idf2): """return the diffs between the two idfs""" thediffs = {} keys = idf1.model.dtls # undocumented variable for akey in keys: idfobjs1 = idf1.idfobjects[akey] idfobjs2 = idf2.idfobjects[akey] names = set([getobjname(i) for i in idfobjs1] + [getobjname(i) for i in idfobjs2]) names = sorted(names) idfobjs1 = sorted(idfobjs1, key=lambda idfobj: idfobj['obj']) idfobjs2 = sorted(idfobjs2, key=lambda idfobj: idfobj['obj']) for name in names: n_idfobjs1 = [item for item in idfobjs1 if getobjname(item) == name] n_idfobjs2 = [item for item in idfobjs2 if getobjname(item) == name] for idfobj1, idfobj2 in itertools.zip_longest(n_idfobjs1, n_idfobjs2): if idfobj1 == None: thediffs[(idfobj2.key.upper(), getobjname(idfobj2))] = (None, idf1.idfname) #(idf1.idfname, None) break if idfobj2 == None: thediffs[(idfobj1.key.upper(), getobjname(idfobj1))] = (idf2.idfname, None) # (None, idf2.idfname) break # for i, (f1, f2) in enumerate(zip(idfobj1.obj, idfobj2.obj)): # if i == 0: # f1, f2 = f1.upper(), f2.upper() # if f1 != f2: # thediffs[(akey, # getobjname(idfobj1), # idfobj1.objidd[i]['field'][0])] = (f1, f2) return thediffs
[ "def", "idfdiffs", "(", "idf1", ",", "idf2", ")", ":", "thediffs", "=", "{", "}", "keys", "=", "idf1", ".", "model", ".", "dtls", "# undocumented variable", "for", "akey", "in", "keys", ":", "idfobjs1", "=", "idf1", ".", "idfobjects", "[", "akey", "]", "idfobjs2", "=", "idf2", ".", "idfobjects", "[", "akey", "]", "names", "=", "set", "(", "[", "getobjname", "(", "i", ")", "for", "i", "in", "idfobjs1", "]", "+", "[", "getobjname", "(", "i", ")", "for", "i", "in", "idfobjs2", "]", ")", "names", "=", "sorted", "(", "names", ")", "idfobjs1", "=", "sorted", "(", "idfobjs1", ",", "key", "=", "lambda", "idfobj", ":", "idfobj", "[", "'obj'", "]", ")", "idfobjs2", "=", "sorted", "(", "idfobjs2", ",", "key", "=", "lambda", "idfobj", ":", "idfobj", "[", "'obj'", "]", ")", "for", "name", "in", "names", ":", "n_idfobjs1", "=", "[", "item", "for", "item", "in", "idfobjs1", "if", "getobjname", "(", "item", ")", "==", "name", "]", "n_idfobjs2", "=", "[", "item", "for", "item", "in", "idfobjs2", "if", "getobjname", "(", "item", ")", "==", "name", "]", "for", "idfobj1", ",", "idfobj2", "in", "itertools", ".", "zip_longest", "(", "n_idfobjs1", ",", "n_idfobjs2", ")", ":", "if", "idfobj1", "==", "None", ":", "thediffs", "[", "(", "idfobj2", ".", "key", ".", "upper", "(", ")", ",", "getobjname", "(", "idfobj2", ")", ")", "]", "=", "(", "None", ",", "idf1", ".", "idfname", ")", "#(idf1.idfname, None)", "break", "if", "idfobj2", "==", "None", ":", "thediffs", "[", "(", "idfobj1", ".", "key", ".", "upper", "(", ")", ",", "getobjname", "(", "idfobj1", ")", ")", "]", "=", "(", "idf2", ".", "idfname", ",", "None", ")", "# (None, idf2.idfname)", "break", "# for i, (f1, f2) in enumerate(zip(idfobj1.obj, idfobj2.obj)):", "# if i == 0:", "# f1, f2 = f1.upper(), f2.upper()", "# if f1 != f2:", "# thediffs[(akey,", "# getobjname(idfobj1),", "# idfobj1.objidd[i]['field'][0])] = (f1, f2)", "return", "thediffs" ]
return the diffs between the two idfs
[ "return", "the", "diffs", "between", "the", "two", "idfs" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/useful_scripts/idfdiff_missing.py#L92-L127
santoshphilip/eppy
eppy/useful_scripts/autosize.py
autosize_fieldname
def autosize_fieldname(idfobject): """return autsizeable field names in idfobject""" # undocumented stuff in this code return [fname for (fname, dct) in zip(idfobject.objls, idfobject['objidd']) if 'autosizable' in dct]
python
def autosize_fieldname(idfobject): """return autsizeable field names in idfobject""" # undocumented stuff in this code return [fname for (fname, dct) in zip(idfobject.objls, idfobject['objidd']) if 'autosizable' in dct]
[ "def", "autosize_fieldname", "(", "idfobject", ")", ":", "# undocumented stuff in this code", "return", "[", "fname", "for", "(", "fname", ",", "dct", ")", "in", "zip", "(", "idfobject", ".", "objls", ",", "idfobject", "[", "'objidd'", "]", ")", "if", "'autosizable'", "in", "dct", "]" ]
return autsizeable field names in idfobject
[ "return", "autsizeable", "field", "names", "in", "idfobject" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/useful_scripts/autosize.py#L36-L41
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/iddgroups.py
idd2group
def idd2group(fhandle): """wrapper for iddtxt2groups""" try: txt = fhandle.read() return iddtxt2groups(txt) except AttributeError as e: txt = open(fhandle, 'r').read() return iddtxt2groups(txt)
python
def idd2group(fhandle): """wrapper for iddtxt2groups""" try: txt = fhandle.read() return iddtxt2groups(txt) except AttributeError as e: txt = open(fhandle, 'r').read() return iddtxt2groups(txt)
[ "def", "idd2group", "(", "fhandle", ")", ":", "try", ":", "txt", "=", "fhandle", ".", "read", "(", ")", "return", "iddtxt2groups", "(", "txt", ")", "except", "AttributeError", "as", "e", ":", "txt", "=", "open", "(", "fhandle", ",", "'r'", ")", ".", "read", "(", ")", "return", "iddtxt2groups", "(", "txt", ")" ]
wrapper for iddtxt2groups
[ "wrapper", "for", "iddtxt2groups" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/iddgroups.py#L27-L34
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/iddgroups.py
idd2grouplist
def idd2grouplist(fhandle): """wrapper for iddtxt2grouplist""" try: txt = fhandle.read() return iddtxt2grouplist(txt) except AttributeError as e: txt = open(fhandle, 'r').read() return iddtxt2grouplist(txt)
python
def idd2grouplist(fhandle): """wrapper for iddtxt2grouplist""" try: txt = fhandle.read() return iddtxt2grouplist(txt) except AttributeError as e: txt = open(fhandle, 'r').read() return iddtxt2grouplist(txt)
[ "def", "idd2grouplist", "(", "fhandle", ")", ":", "try", ":", "txt", "=", "fhandle", ".", "read", "(", ")", "return", "iddtxt2grouplist", "(", "txt", ")", "except", "AttributeError", "as", "e", ":", "txt", "=", "open", "(", "fhandle", ",", "'r'", ")", ".", "read", "(", ")", "return", "iddtxt2grouplist", "(", "txt", ")" ]
wrapper for iddtxt2grouplist
[ "wrapper", "for", "iddtxt2grouplist" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/iddgroups.py#L36-L43
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/iddgroups.py
iddtxt2groups
def iddtxt2groups(txt): """extract the groups from the idd file""" try: txt = txt.decode('ISO-8859-2') except AttributeError as e: pass # for python 3 txt = nocomment(txt, '!') txt = txt.replace("\\group", "!-group") # retains group in next line txt = nocomment(txt, '\\') # remove all other idd info lines = txt.splitlines() lines = [line.strip() for line in lines] # cleanup lines = [line for line in lines if line != ''] # cleanup txt = '\n'.join(lines) gsplits = txt.split('!') # split into groups, since we have !-group gsplits = [gsplit.splitlines() for gsplit in gsplits] # split group gsplits[0].insert(0, None) # Put None for the first group that does nothave a group name gdict = {} for gsplit in gsplits: gdict.update({gsplit[0]:gsplit[1:]}) # makes dict {groupname:[k1, k2], groupname2:[k3, k4]} gdict = {k:'\n'.join(v) for k, v in gdict.items()}# joins lines back gdict = {k:v.split(';') for k, v in gdict.items()} # splits into idfobjects gdict = {k:[i.strip() for i in v] for k, v in gdict.items()} # cleanup gdict = {k:[i.splitlines() for i in v] for k, v in gdict.items()} # splits idfobjects into lines gdict = {k:[i for i in v if len(i) > 0] for k, v in gdict.items()} # cleanup - removes blank lines gdict = {k:[i[0] for i in v] for k, v in gdict.items()} # use first line gdict = {k:[i.split(',')[0] for i in v] for k, v in gdict.items()} # remove ',' nvalue = gdict.pop(None) # remove group with no name gdict = {k[len('-group '):]:v for k, v in gdict.items()} # get group name gdict.update({None:nvalue}) # put back group with no name return gdict
python
def iddtxt2groups(txt): """extract the groups from the idd file""" try: txt = txt.decode('ISO-8859-2') except AttributeError as e: pass # for python 3 txt = nocomment(txt, '!') txt = txt.replace("\\group", "!-group") # retains group in next line txt = nocomment(txt, '\\') # remove all other idd info lines = txt.splitlines() lines = [line.strip() for line in lines] # cleanup lines = [line for line in lines if line != ''] # cleanup txt = '\n'.join(lines) gsplits = txt.split('!') # split into groups, since we have !-group gsplits = [gsplit.splitlines() for gsplit in gsplits] # split group gsplits[0].insert(0, None) # Put None for the first group that does nothave a group name gdict = {} for gsplit in gsplits: gdict.update({gsplit[0]:gsplit[1:]}) # makes dict {groupname:[k1, k2], groupname2:[k3, k4]} gdict = {k:'\n'.join(v) for k, v in gdict.items()}# joins lines back gdict = {k:v.split(';') for k, v in gdict.items()} # splits into idfobjects gdict = {k:[i.strip() for i in v] for k, v in gdict.items()} # cleanup gdict = {k:[i.splitlines() for i in v] for k, v in gdict.items()} # splits idfobjects into lines gdict = {k:[i for i in v if len(i) > 0] for k, v in gdict.items()} # cleanup - removes blank lines gdict = {k:[i[0] for i in v] for k, v in gdict.items()} # use first line gdict = {k:[i.split(',')[0] for i in v] for k, v in gdict.items()} # remove ',' nvalue = gdict.pop(None) # remove group with no name gdict = {k[len('-group '):]:v for k, v in gdict.items()} # get group name gdict.update({None:nvalue}) # put back group with no name return gdict
[ "def", "iddtxt2groups", "(", "txt", ")", ":", "try", ":", "txt", "=", "txt", ".", "decode", "(", "'ISO-8859-2'", ")", "except", "AttributeError", "as", "e", ":", "pass", "# for python 3", "txt", "=", "nocomment", "(", "txt", ",", "'!'", ")", "txt", "=", "txt", ".", "replace", "(", "\"\\\\group\"", ",", "\"!-group\"", ")", "# retains group in next line", "txt", "=", "nocomment", "(", "txt", ",", "'\\\\'", ")", "# remove all other idd info", "lines", "=", "txt", ".", "splitlines", "(", ")", "lines", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "lines", "]", "# cleanup", "lines", "=", "[", "line", "for", "line", "in", "lines", "if", "line", "!=", "''", "]", "# cleanup", "txt", "=", "'\\n'", ".", "join", "(", "lines", ")", "gsplits", "=", "txt", ".", "split", "(", "'!'", ")", "# split into groups, since we have !-group", "gsplits", "=", "[", "gsplit", ".", "splitlines", "(", ")", "for", "gsplit", "in", "gsplits", "]", "# split group", "gsplits", "[", "0", "]", ".", "insert", "(", "0", ",", "None", ")", "# Put None for the first group that does nothave a group name", "gdict", "=", "{", "}", "for", "gsplit", "in", "gsplits", ":", "gdict", ".", "update", "(", "{", "gsplit", "[", "0", "]", ":", "gsplit", "[", "1", ":", "]", "}", ")", "# makes dict {groupname:[k1, k2], groupname2:[k3, k4]}", "gdict", "=", "{", "k", ":", "'\\n'", ".", "join", "(", "v", ")", "for", "k", ",", "v", "in", "gdict", ".", "items", "(", ")", "}", "# joins lines back", "gdict", "=", "{", "k", ":", "v", ".", "split", "(", "';'", ")", "for", "k", ",", "v", "in", "gdict", ".", "items", "(", ")", "}", "# splits into idfobjects", "gdict", "=", "{", "k", ":", "[", "i", ".", "strip", "(", ")", "for", "i", "in", "v", "]", "for", "k", ",", "v", "in", "gdict", ".", "items", "(", ")", "}", "# cleanup", "gdict", "=", "{", "k", ":", "[", "i", ".", "splitlines", "(", ")", "for", "i", "in", "v", "]", "for", "k", ",", "v", "in", "gdict", ".", "items", "(", ")", "}", "# splits idfobjects into lines", "gdict", "=", "{", "k", ":", "[", "i", "for", "i", "in", "v", "if", "len", "(", "i", ")", ">", "0", "]", "for", "k", ",", "v", "in", "gdict", ".", "items", "(", ")", "}", "# cleanup - removes blank lines", "gdict", "=", "{", "k", ":", "[", "i", "[", "0", "]", "for", "i", "in", "v", "]", "for", "k", ",", "v", "in", "gdict", ".", "items", "(", ")", "}", "# use first line", "gdict", "=", "{", "k", ":", "[", "i", ".", "split", "(", "','", ")", "[", "0", "]", "for", "i", "in", "v", "]", "for", "k", ",", "v", "in", "gdict", ".", "items", "(", ")", "}", "# remove ','", "nvalue", "=", "gdict", ".", "pop", "(", "None", ")", "# remove group with no name", "gdict", "=", "{", "k", "[", "len", "(", "'-group '", ")", ":", "]", ":", "v", "for", "k", ",", "v", "in", "gdict", ".", "items", "(", ")", "}", "# get group name", "gdict", ".", "update", "(", "{", "None", ":", "nvalue", "}", ")", "# put back group with no name", "return", "gdict" ]
extract the groups from the idd file
[ "extract", "the", "groups", "from", "the", "idd", "file" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/iddgroups.py#L46-L82
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/iddgroups.py
iddtxt2grouplist
def iddtxt2grouplist(txt): """return a list of group names the list in the same order as the idf objects in idd file """ def makenone(astr): if astr == 'None': return None else: return astr txt = nocomment(txt, '!') txt = txt.replace("\\group", "!-group") # retains group in next line txt = nocomment(txt, '\\') # remove all other idd info lines = txt.splitlines() lines = [line.strip() for line in lines] # cleanup lines = [line for line in lines if line != ''] # cleanup txt = '\n'.join(lines) gsplits = txt.split('!') # split into groups, since we have !-group gsplits = [gsplit.splitlines() for gsplit in gsplits] # split group gsplits[0].insert(0, u'-group None') # Put None for the first group that does nothave a group name glist = [] for gsplit in gsplits: glist.append((gsplit[0], gsplit[1:])) # makes dict {groupname:[k1, k2], groupname2:[k3, k4]} glist = [(k, '\n'.join(v)) for k, v in glist]# joins lines back glist = [(k, v.split(';')) for k, v in glist] # splits into idfobjects glist = [(k, [i.strip() for i in v]) for k, v in glist] # cleanup glist = [(k, [i.splitlines() for i in v]) for k, v in glist] # splits idfobjects into lines glist = [(k, [i for i in v if len(i) > 0]) for k, v in glist] # cleanup - removes blank lines glist = [(k, [i[0] for i in v]) for k, v in glist] # use first line fglist = [] for gnamelist in glist: gname = gnamelist[0] thelist = gnamelist[-1] for item in thelist: fglist.append((gname, item)) glist = [(gname[len("-group "):], obj) for gname, obj in fglist] # remove "-group " glist = [(makenone(gname), obj) for gname, obj in glist] # make str None into None glist = [(gname, obj.split(',')[0]) for gname, obj in glist] # remove comma return glist
python
def iddtxt2grouplist(txt): """return a list of group names the list in the same order as the idf objects in idd file """ def makenone(astr): if astr == 'None': return None else: return astr txt = nocomment(txt, '!') txt = txt.replace("\\group", "!-group") # retains group in next line txt = nocomment(txt, '\\') # remove all other idd info lines = txt.splitlines() lines = [line.strip() for line in lines] # cleanup lines = [line for line in lines if line != ''] # cleanup txt = '\n'.join(lines) gsplits = txt.split('!') # split into groups, since we have !-group gsplits = [gsplit.splitlines() for gsplit in gsplits] # split group gsplits[0].insert(0, u'-group None') # Put None for the first group that does nothave a group name glist = [] for gsplit in gsplits: glist.append((gsplit[0], gsplit[1:])) # makes dict {groupname:[k1, k2], groupname2:[k3, k4]} glist = [(k, '\n'.join(v)) for k, v in glist]# joins lines back glist = [(k, v.split(';')) for k, v in glist] # splits into idfobjects glist = [(k, [i.strip() for i in v]) for k, v in glist] # cleanup glist = [(k, [i.splitlines() for i in v]) for k, v in glist] # splits idfobjects into lines glist = [(k, [i for i in v if len(i) > 0]) for k, v in glist] # cleanup - removes blank lines glist = [(k, [i[0] for i in v]) for k, v in glist] # use first line fglist = [] for gnamelist in glist: gname = gnamelist[0] thelist = gnamelist[-1] for item in thelist: fglist.append((gname, item)) glist = [(gname[len("-group "):], obj) for gname, obj in fglist] # remove "-group " glist = [(makenone(gname), obj) for gname, obj in glist] # make str None into None glist = [(gname, obj.split(',')[0]) for gname, obj in glist] # remove comma return glist
[ "def", "iddtxt2grouplist", "(", "txt", ")", ":", "def", "makenone", "(", "astr", ")", ":", "if", "astr", "==", "'None'", ":", "return", "None", "else", ":", "return", "astr", "txt", "=", "nocomment", "(", "txt", ",", "'!'", ")", "txt", "=", "txt", ".", "replace", "(", "\"\\\\group\"", ",", "\"!-group\"", ")", "# retains group in next line", "txt", "=", "nocomment", "(", "txt", ",", "'\\\\'", ")", "# remove all other idd info", "lines", "=", "txt", ".", "splitlines", "(", ")", "lines", "=", "[", "line", ".", "strip", "(", ")", "for", "line", "in", "lines", "]", "# cleanup", "lines", "=", "[", "line", "for", "line", "in", "lines", "if", "line", "!=", "''", "]", "# cleanup", "txt", "=", "'\\n'", ".", "join", "(", "lines", ")", "gsplits", "=", "txt", ".", "split", "(", "'!'", ")", "# split into groups, since we have !-group", "gsplits", "=", "[", "gsplit", ".", "splitlines", "(", ")", "for", "gsplit", "in", "gsplits", "]", "# split group", "gsplits", "[", "0", "]", ".", "insert", "(", "0", ",", "u'-group None'", ")", "# Put None for the first group that does nothave a group name", "glist", "=", "[", "]", "for", "gsplit", "in", "gsplits", ":", "glist", ".", "append", "(", "(", "gsplit", "[", "0", "]", ",", "gsplit", "[", "1", ":", "]", ")", ")", "# makes dict {groupname:[k1, k2], groupname2:[k3, k4]}", "glist", "=", "[", "(", "k", ",", "'\\n'", ".", "join", "(", "v", ")", ")", "for", "k", ",", "v", "in", "glist", "]", "# joins lines back", "glist", "=", "[", "(", "k", ",", "v", ".", "split", "(", "';'", ")", ")", "for", "k", ",", "v", "in", "glist", "]", "# splits into idfobjects", "glist", "=", "[", "(", "k", ",", "[", "i", ".", "strip", "(", ")", "for", "i", "in", "v", "]", ")", "for", "k", ",", "v", "in", "glist", "]", "# cleanup", "glist", "=", "[", "(", "k", ",", "[", "i", ".", "splitlines", "(", ")", "for", "i", "in", "v", "]", ")", "for", "k", ",", "v", "in", "glist", "]", "# splits idfobjects into lines", "glist", "=", "[", "(", "k", ",", "[", "i", "for", "i", "in", "v", "if", "len", "(", "i", ")", ">", "0", "]", ")", "for", "k", ",", "v", "in", "glist", "]", "# cleanup - removes blank lines", "glist", "=", "[", "(", "k", ",", "[", "i", "[", "0", "]", "for", "i", "in", "v", "]", ")", "for", "k", ",", "v", "in", "glist", "]", "# use first line", "fglist", "=", "[", "]", "for", "gnamelist", "in", "glist", ":", "gname", "=", "gnamelist", "[", "0", "]", "thelist", "=", "gnamelist", "[", "-", "1", "]", "for", "item", "in", "thelist", ":", "fglist", ".", "append", "(", "(", "gname", ",", "item", ")", ")", "glist", "=", "[", "(", "gname", "[", "len", "(", "\"-group \"", ")", ":", "]", ",", "obj", ")", "for", "gname", ",", "obj", "in", "fglist", "]", "# remove \"-group \"", "glist", "=", "[", "(", "makenone", "(", "gname", ")", ",", "obj", ")", "for", "gname", ",", "obj", "in", "glist", "]", "# make str None into None", "glist", "=", "[", "(", "gname", ",", "obj", ".", "split", "(", "','", ")", "[", "0", "]", ")", "for", "gname", ",", "obj", "in", "glist", "]", "# remove comma", "return", "glist" ]
return a list of group names the list in the same order as the idf objects in idd file
[ "return", "a", "list", "of", "group", "names", "the", "list", "in", "the", "same", "order", "as", "the", "idf", "objects", "in", "idd", "file" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/iddgroups.py#L84-L129
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/iddgroups.py
group2commlst
def group2commlst(commlst, glist): """add group info to commlst""" for (gname, objname), commitem in zip(glist, commlst): newitem1 = "group %s" % (gname, ) newitem2 = "idfobj %s" % (objname, ) commitem[0].insert(0, newitem1) commitem[0].insert(1, newitem2) return commlst
python
def group2commlst(commlst, glist): """add group info to commlst""" for (gname, objname), commitem in zip(glist, commlst): newitem1 = "group %s" % (gname, ) newitem2 = "idfobj %s" % (objname, ) commitem[0].insert(0, newitem1) commitem[0].insert(1, newitem2) return commlst
[ "def", "group2commlst", "(", "commlst", ",", "glist", ")", ":", "for", "(", "gname", ",", "objname", ")", ",", "commitem", "in", "zip", "(", "glist", ",", "commlst", ")", ":", "newitem1", "=", "\"group %s\"", "%", "(", "gname", ",", ")", "newitem2", "=", "\"idfobj %s\"", "%", "(", "objname", ",", ")", "commitem", "[", "0", "]", ".", "insert", "(", "0", ",", "newitem1", ")", "commitem", "[", "0", "]", ".", "insert", "(", "1", ",", "newitem2", ")", "return", "commlst" ]
add group info to commlst
[ "add", "group", "info", "to", "commlst" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/iddgroups.py#L131-L138
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/iddgroups.py
group2commdct
def group2commdct(commdct, glist): """add group info tocomdct""" for (gname, objname), commitem in zip(glist, commdct): commitem[0]['group'] = gname commitem[0]['idfobj'] = objname return commdct
python
def group2commdct(commdct, glist): """add group info tocomdct""" for (gname, objname), commitem in zip(glist, commdct): commitem[0]['group'] = gname commitem[0]['idfobj'] = objname return commdct
[ "def", "group2commdct", "(", "commdct", ",", "glist", ")", ":", "for", "(", "gname", ",", "objname", ")", ",", "commitem", "in", "zip", "(", "glist", ",", "commdct", ")", ":", "commitem", "[", "0", "]", "[", "'group'", "]", "=", "gname", "commitem", "[", "0", "]", "[", "'idfobj'", "]", "=", "objname", "return", "commdct" ]
add group info tocomdct
[ "add", "group", "info", "tocomdct" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/iddgroups.py#L140-L145
santoshphilip/eppy
eppy/EPlusInterfaceFunctions/iddgroups.py
commdct2grouplist
def commdct2grouplist(gcommdct): """extract embedded group data from commdct. return gdict -> {g1:[obj1, obj2, obj3], g2:[obj4, ..]}""" gdict = {} for objidd in gcommdct: group = objidd[0]['group'] objname = objidd[0]['idfobj'] if group in gdict: gdict[group].append(objname) else: gdict[group] = [objname, ] return gdict
python
def commdct2grouplist(gcommdct): """extract embedded group data from commdct. return gdict -> {g1:[obj1, obj2, obj3], g2:[obj4, ..]}""" gdict = {} for objidd in gcommdct: group = objidd[0]['group'] objname = objidd[0]['idfobj'] if group in gdict: gdict[group].append(objname) else: gdict[group] = [objname, ] return gdict
[ "def", "commdct2grouplist", "(", "gcommdct", ")", ":", "gdict", "=", "{", "}", "for", "objidd", "in", "gcommdct", ":", "group", "=", "objidd", "[", "0", "]", "[", "'group'", "]", "objname", "=", "objidd", "[", "0", "]", "[", "'idfobj'", "]", "if", "group", "in", "gdict", ":", "gdict", "[", "group", "]", ".", "append", "(", "objname", ")", "else", ":", "gdict", "[", "group", "]", "=", "[", "objname", ",", "]", "return", "gdict" ]
extract embedded group data from commdct. return gdict -> {g1:[obj1, obj2, obj3], g2:[obj4, ..]}
[ "extract", "embedded", "group", "data", "from", "commdct", ".", "return", "gdict", "-", ">", "{", "g1", ":", "[", "obj1", "obj2", "obj3", "]", "g2", ":", "[", "obj4", "..", "]", "}" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/EPlusInterfaceFunctions/iddgroups.py#L147-L158
santoshphilip/eppy
eppy/hvacbuilder.py
flattencopy
def flattencopy(lst): """flatten and return a copy of the list indefficient on large lists""" # modified from # http://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists-in-python thelist = copy.deepcopy(lst) list_is_nested = True while list_is_nested: # outer loop keepchecking = False atemp = [] for element in thelist: # inner loop if isinstance(element, list): atemp.extend(element) keepchecking = True else: atemp.append(element) list_is_nested = keepchecking # determine if outer loop exits thelist = atemp[:] return thelist
python
def flattencopy(lst): """flatten and return a copy of the list indefficient on large lists""" # modified from # http://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists-in-python thelist = copy.deepcopy(lst) list_is_nested = True while list_is_nested: # outer loop keepchecking = False atemp = [] for element in thelist: # inner loop if isinstance(element, list): atemp.extend(element) keepchecking = True else: atemp.append(element) list_is_nested = keepchecking # determine if outer loop exits thelist = atemp[:] return thelist
[ "def", "flattencopy", "(", "lst", ")", ":", "# modified from", "# http://stackoverflow.com/questions/2158395/flatten-an-irregular-list-of-lists-in-python", "thelist", "=", "copy", ".", "deepcopy", "(", "lst", ")", "list_is_nested", "=", "True", "while", "list_is_nested", ":", "# outer loop", "keepchecking", "=", "False", "atemp", "=", "[", "]", "for", "element", "in", "thelist", ":", "# inner loop", "if", "isinstance", "(", "element", ",", "list", ")", ":", "atemp", ".", "extend", "(", "element", ")", "keepchecking", "=", "True", "else", ":", "atemp", ".", "append", "(", "element", ")", "list_is_nested", "=", "keepchecking", "# determine if outer loop exits", "thelist", "=", "atemp", "[", ":", "]", "return", "thelist" ]
flatten and return a copy of the list indefficient on large lists
[ "flatten", "and", "return", "a", "copy", "of", "the", "list", "indefficient", "on", "large", "lists" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L52-L70
santoshphilip/eppy
eppy/hvacbuilder.py
makepipecomponent
def makepipecomponent(idf, pname): """make a pipe component generate inlet outlet names""" apipe = idf.newidfobject("Pipe:Adiabatic".upper(), Name=pname) apipe.Inlet_Node_Name = "%s_inlet" % (pname,) apipe.Outlet_Node_Name = "%s_outlet" % (pname,) return apipe
python
def makepipecomponent(idf, pname): """make a pipe component generate inlet outlet names""" apipe = idf.newidfobject("Pipe:Adiabatic".upper(), Name=pname) apipe.Inlet_Node_Name = "%s_inlet" % (pname,) apipe.Outlet_Node_Name = "%s_outlet" % (pname,) return apipe
[ "def", "makepipecomponent", "(", "idf", ",", "pname", ")", ":", "apipe", "=", "idf", ".", "newidfobject", "(", "\"Pipe:Adiabatic\"", ".", "upper", "(", ")", ",", "Name", "=", "pname", ")", "apipe", ".", "Inlet_Node_Name", "=", "\"%s_inlet\"", "%", "(", "pname", ",", ")", "apipe", ".", "Outlet_Node_Name", "=", "\"%s_outlet\"", "%", "(", "pname", ",", ")", "return", "apipe" ]
make a pipe component generate inlet outlet names
[ "make", "a", "pipe", "component", "generate", "inlet", "outlet", "names" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L72-L78
santoshphilip/eppy
eppy/hvacbuilder.py
makeductcomponent
def makeductcomponent(idf, dname): """make a duct component generate inlet outlet names""" aduct = idf.newidfobject("duct".upper(), Name=dname) aduct.Inlet_Node_Name = "%s_inlet" % (dname,) aduct.Outlet_Node_Name = "%s_outlet" % (dname,) return aduct
python
def makeductcomponent(idf, dname): """make a duct component generate inlet outlet names""" aduct = idf.newidfobject("duct".upper(), Name=dname) aduct.Inlet_Node_Name = "%s_inlet" % (dname,) aduct.Outlet_Node_Name = "%s_outlet" % (dname,) return aduct
[ "def", "makeductcomponent", "(", "idf", ",", "dname", ")", ":", "aduct", "=", "idf", ".", "newidfobject", "(", "\"duct\"", ".", "upper", "(", ")", ",", "Name", "=", "dname", ")", "aduct", ".", "Inlet_Node_Name", "=", "\"%s_inlet\"", "%", "(", "dname", ",", ")", "aduct", ".", "Outlet_Node_Name", "=", "\"%s_outlet\"", "%", "(", "dname", ",", ")", "return", "aduct" ]
make a duct component generate inlet outlet names
[ "make", "a", "duct", "component", "generate", "inlet", "outlet", "names" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L80-L86
santoshphilip/eppy
eppy/hvacbuilder.py
makepipebranch
def makepipebranch(idf, bname): """make a branch with a pipe use standard inlet outlet names""" # make the pipe component first pname = "%s_pipe" % (bname,) apipe = makepipecomponent(idf, pname) # now make the branch with the pipe in it abranch = idf.newidfobject("BRANCH", Name=bname) abranch.Component_1_Object_Type = 'Pipe:Adiabatic' abranch.Component_1_Name = pname abranch.Component_1_Inlet_Node_Name = apipe.Inlet_Node_Name abranch.Component_1_Outlet_Node_Name = apipe.Outlet_Node_Name abranch.Component_1_Branch_Control_Type = "Bypass" return abranch
python
def makepipebranch(idf, bname): """make a branch with a pipe use standard inlet outlet names""" # make the pipe component first pname = "%s_pipe" % (bname,) apipe = makepipecomponent(idf, pname) # now make the branch with the pipe in it abranch = idf.newidfobject("BRANCH", Name=bname) abranch.Component_1_Object_Type = 'Pipe:Adiabatic' abranch.Component_1_Name = pname abranch.Component_1_Inlet_Node_Name = apipe.Inlet_Node_Name abranch.Component_1_Outlet_Node_Name = apipe.Outlet_Node_Name abranch.Component_1_Branch_Control_Type = "Bypass" return abranch
[ "def", "makepipebranch", "(", "idf", ",", "bname", ")", ":", "# make the pipe component first", "pname", "=", "\"%s_pipe\"", "%", "(", "bname", ",", ")", "apipe", "=", "makepipecomponent", "(", "idf", ",", "pname", ")", "# now make the branch with the pipe in it", "abranch", "=", "idf", ".", "newidfobject", "(", "\"BRANCH\"", ",", "Name", "=", "bname", ")", "abranch", ".", "Component_1_Object_Type", "=", "'Pipe:Adiabatic'", "abranch", ".", "Component_1_Name", "=", "pname", "abranch", ".", "Component_1_Inlet_Node_Name", "=", "apipe", ".", "Inlet_Node_Name", "abranch", ".", "Component_1_Outlet_Node_Name", "=", "apipe", ".", "Outlet_Node_Name", "abranch", ".", "Component_1_Branch_Control_Type", "=", "\"Bypass\"", "return", "abranch" ]
make a branch with a pipe use standard inlet outlet names
[ "make", "a", "branch", "with", "a", "pipe", "use", "standard", "inlet", "outlet", "names" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L88-L101
santoshphilip/eppy
eppy/hvacbuilder.py
makeductbranch
def makeductbranch(idf, bname): """make a branch with a duct use standard inlet outlet names""" # make the duct component first pname = "%s_duct" % (bname,) aduct = makeductcomponent(idf, pname) # now make the branch with the duct in it abranch = idf.newidfobject("BRANCH", Name=bname) abranch.Component_1_Object_Type = 'duct' abranch.Component_1_Name = pname abranch.Component_1_Inlet_Node_Name = aduct.Inlet_Node_Name abranch.Component_1_Outlet_Node_Name = aduct.Outlet_Node_Name abranch.Component_1_Branch_Control_Type = "Bypass" return abranch
python
def makeductbranch(idf, bname): """make a branch with a duct use standard inlet outlet names""" # make the duct component first pname = "%s_duct" % (bname,) aduct = makeductcomponent(idf, pname) # now make the branch with the duct in it abranch = idf.newidfobject("BRANCH", Name=bname) abranch.Component_1_Object_Type = 'duct' abranch.Component_1_Name = pname abranch.Component_1_Inlet_Node_Name = aduct.Inlet_Node_Name abranch.Component_1_Outlet_Node_Name = aduct.Outlet_Node_Name abranch.Component_1_Branch_Control_Type = "Bypass" return abranch
[ "def", "makeductbranch", "(", "idf", ",", "bname", ")", ":", "# make the duct component first", "pname", "=", "\"%s_duct\"", "%", "(", "bname", ",", ")", "aduct", "=", "makeductcomponent", "(", "idf", ",", "pname", ")", "# now make the branch with the duct in it", "abranch", "=", "idf", ".", "newidfobject", "(", "\"BRANCH\"", ",", "Name", "=", "bname", ")", "abranch", ".", "Component_1_Object_Type", "=", "'duct'", "abranch", ".", "Component_1_Name", "=", "pname", "abranch", ".", "Component_1_Inlet_Node_Name", "=", "aduct", ".", "Inlet_Node_Name", "abranch", ".", "Component_1_Outlet_Node_Name", "=", "aduct", ".", "Outlet_Node_Name", "abranch", ".", "Component_1_Branch_Control_Type", "=", "\"Bypass\"", "return", "abranch" ]
make a branch with a duct use standard inlet outlet names
[ "make", "a", "branch", "with", "a", "duct", "use", "standard", "inlet", "outlet", "names" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L103-L116
santoshphilip/eppy
eppy/hvacbuilder.py
getbranchcomponents
def getbranchcomponents(idf, branch, utest=False): """get the components of the branch""" fobjtype = 'Component_%s_Object_Type' fobjname = 'Component_%s_Name' complist = [] for i in range(1, 100000): try: objtype = branch[fobjtype % (i,)] if objtype.strip() == '': break objname = branch[fobjname % (i,)] complist.append((objtype, objname)) except bunch_subclass.BadEPFieldError: break if utest: return complist else: return [idf.getobject(ot, on) for ot, on in complist]
python
def getbranchcomponents(idf, branch, utest=False): """get the components of the branch""" fobjtype = 'Component_%s_Object_Type' fobjname = 'Component_%s_Name' complist = [] for i in range(1, 100000): try: objtype = branch[fobjtype % (i,)] if objtype.strip() == '': break objname = branch[fobjname % (i,)] complist.append((objtype, objname)) except bunch_subclass.BadEPFieldError: break if utest: return complist else: return [idf.getobject(ot, on) for ot, on in complist]
[ "def", "getbranchcomponents", "(", "idf", ",", "branch", ",", "utest", "=", "False", ")", ":", "fobjtype", "=", "'Component_%s_Object_Type'", "fobjname", "=", "'Component_%s_Name'", "complist", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "100000", ")", ":", "try", ":", "objtype", "=", "branch", "[", "fobjtype", "%", "(", "i", ",", ")", "]", "if", "objtype", ".", "strip", "(", ")", "==", "''", ":", "break", "objname", "=", "branch", "[", "fobjname", "%", "(", "i", ",", ")", "]", "complist", ".", "append", "(", "(", "objtype", ",", "objname", ")", ")", "except", "bunch_subclass", ".", "BadEPFieldError", ":", "break", "if", "utest", ":", "return", "complist", "else", ":", "return", "[", "idf", ".", "getobject", "(", "ot", ",", "on", ")", "for", "ot", ",", "on", "in", "complist", "]" ]
get the components of the branch
[ "get", "the", "components", "of", "the", "branch" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L118-L135
santoshphilip/eppy
eppy/hvacbuilder.py
renamenodes
def renamenodes(idf, fieldtype): """rename all the changed nodes""" renameds = [] for key in idf.model.dtls: for idfobject in idf.idfobjects[key]: for fieldvalue in idfobject.obj: if type(fieldvalue) is list: if fieldvalue not in renameds: cpvalue = copy.copy(fieldvalue) renameds.append(cpvalue) # do the renaming for key in idf.model.dtls: for idfobject in idf.idfobjects[key]: for i, fieldvalue in enumerate(idfobject.obj): itsidd = idfobject.objidd[i] if 'type' in itsidd: if itsidd['type'][0] == fieldtype: tempdct = dict(renameds) if type(fieldvalue) is list: fieldvalue = fieldvalue[-1] idfobject.obj[i] = fieldvalue else: if fieldvalue in tempdct: fieldvalue = tempdct[fieldvalue] idfobject.obj[i] = fieldvalue
python
def renamenodes(idf, fieldtype): """rename all the changed nodes""" renameds = [] for key in idf.model.dtls: for idfobject in idf.idfobjects[key]: for fieldvalue in idfobject.obj: if type(fieldvalue) is list: if fieldvalue not in renameds: cpvalue = copy.copy(fieldvalue) renameds.append(cpvalue) # do the renaming for key in idf.model.dtls: for idfobject in idf.idfobjects[key]: for i, fieldvalue in enumerate(idfobject.obj): itsidd = idfobject.objidd[i] if 'type' in itsidd: if itsidd['type'][0] == fieldtype: tempdct = dict(renameds) if type(fieldvalue) is list: fieldvalue = fieldvalue[-1] idfobject.obj[i] = fieldvalue else: if fieldvalue in tempdct: fieldvalue = tempdct[fieldvalue] idfobject.obj[i] = fieldvalue
[ "def", "renamenodes", "(", "idf", ",", "fieldtype", ")", ":", "renameds", "=", "[", "]", "for", "key", "in", "idf", ".", "model", ".", "dtls", ":", "for", "idfobject", "in", "idf", ".", "idfobjects", "[", "key", "]", ":", "for", "fieldvalue", "in", "idfobject", ".", "obj", ":", "if", "type", "(", "fieldvalue", ")", "is", "list", ":", "if", "fieldvalue", "not", "in", "renameds", ":", "cpvalue", "=", "copy", ".", "copy", "(", "fieldvalue", ")", "renameds", ".", "append", "(", "cpvalue", ")", "# do the renaming", "for", "key", "in", "idf", ".", "model", ".", "dtls", ":", "for", "idfobject", "in", "idf", ".", "idfobjects", "[", "key", "]", ":", "for", "i", ",", "fieldvalue", "in", "enumerate", "(", "idfobject", ".", "obj", ")", ":", "itsidd", "=", "idfobject", ".", "objidd", "[", "i", "]", "if", "'type'", "in", "itsidd", ":", "if", "itsidd", "[", "'type'", "]", "[", "0", "]", "==", "fieldtype", ":", "tempdct", "=", "dict", "(", "renameds", ")", "if", "type", "(", "fieldvalue", ")", "is", "list", ":", "fieldvalue", "=", "fieldvalue", "[", "-", "1", "]", "idfobject", ".", "obj", "[", "i", "]", "=", "fieldvalue", "else", ":", "if", "fieldvalue", "in", "tempdct", ":", "fieldvalue", "=", "tempdct", "[", "fieldvalue", "]", "idfobject", ".", "obj", "[", "i", "]", "=", "fieldvalue" ]
rename all the changed nodes
[ "rename", "all", "the", "changed", "nodes" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L137-L162
santoshphilip/eppy
eppy/hvacbuilder.py
getfieldnamesendswith
def getfieldnamesendswith(idfobject, endswith): """get the filednames for the idfobject based on endswith""" objls = idfobject.objls tmp = [name for name in objls if name.endswith(endswith)] if tmp == []: pass return [name for name in objls if name.endswith(endswith)]
python
def getfieldnamesendswith(idfobject, endswith): """get the filednames for the idfobject based on endswith""" objls = idfobject.objls tmp = [name for name in objls if name.endswith(endswith)] if tmp == []: pass return [name for name in objls if name.endswith(endswith)]
[ "def", "getfieldnamesendswith", "(", "idfobject", ",", "endswith", ")", ":", "objls", "=", "idfobject", ".", "objls", "tmp", "=", "[", "name", "for", "name", "in", "objls", "if", "name", ".", "endswith", "(", "endswith", ")", "]", "if", "tmp", "==", "[", "]", ":", "pass", "return", "[", "name", "for", "name", "in", "objls", "if", "name", ".", "endswith", "(", "endswith", ")", "]" ]
get the filednames for the idfobject based on endswith
[ "get", "the", "filednames", "for", "the", "idfobject", "based", "on", "endswith" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L164-L170
santoshphilip/eppy
eppy/hvacbuilder.py
getnodefieldname
def getnodefieldname(idfobject, endswith, fluid=None, startswith=None): """return the field name of the node fluid is only needed if there are air and water nodes fluid is Air or Water or ''. if the fluid is Steam, use Water""" if startswith is None: startswith = '' if fluid is None: fluid = '' nodenames = getfieldnamesendswith(idfobject, endswith) nodenames = [name for name in nodenames if name.startswith(startswith)] fnodenames = [nd for nd in nodenames if nd.find(fluid) != -1] fnodenames = [name for name in fnodenames if name.startswith(startswith)] if len(fnodenames) == 0: nodename = nodenames[0] else: nodename = fnodenames[0] return nodename
python
def getnodefieldname(idfobject, endswith, fluid=None, startswith=None): """return the field name of the node fluid is only needed if there are air and water nodes fluid is Air or Water or ''. if the fluid is Steam, use Water""" if startswith is None: startswith = '' if fluid is None: fluid = '' nodenames = getfieldnamesendswith(idfobject, endswith) nodenames = [name for name in nodenames if name.startswith(startswith)] fnodenames = [nd for nd in nodenames if nd.find(fluid) != -1] fnodenames = [name for name in fnodenames if name.startswith(startswith)] if len(fnodenames) == 0: nodename = nodenames[0] else: nodename = fnodenames[0] return nodename
[ "def", "getnodefieldname", "(", "idfobject", ",", "endswith", ",", "fluid", "=", "None", ",", "startswith", "=", "None", ")", ":", "if", "startswith", "is", "None", ":", "startswith", "=", "''", "if", "fluid", "is", "None", ":", "fluid", "=", "''", "nodenames", "=", "getfieldnamesendswith", "(", "idfobject", ",", "endswith", ")", "nodenames", "=", "[", "name", "for", "name", "in", "nodenames", "if", "name", ".", "startswith", "(", "startswith", ")", "]", "fnodenames", "=", "[", "nd", "for", "nd", "in", "nodenames", "if", "nd", ".", "find", "(", "fluid", ")", "!=", "-", "1", "]", "fnodenames", "=", "[", "name", "for", "name", "in", "fnodenames", "if", "name", ".", "startswith", "(", "startswith", ")", "]", "if", "len", "(", "fnodenames", ")", "==", "0", ":", "nodename", "=", "nodenames", "[", "0", "]", "else", ":", "nodename", "=", "fnodenames", "[", "0", "]", "return", "nodename" ]
return the field name of the node fluid is only needed if there are air and water nodes fluid is Air or Water or ''. if the fluid is Steam, use Water
[ "return", "the", "field", "name", "of", "the", "node", "fluid", "is", "only", "needed", "if", "there", "are", "air", "and", "water", "nodes", "fluid", "is", "Air", "or", "Water", "or", ".", "if", "the", "fluid", "is", "Steam", "use", "Water" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L172-L189
santoshphilip/eppy
eppy/hvacbuilder.py
connectcomponents
def connectcomponents(idf, components, fluid=None): """rename nodes so that the components get connected fluid is only needed if there are air and water nodes fluid is Air or Water or ''. if the fluid is Steam, use Water""" if fluid is None: fluid = '' if len(components) == 1: thiscomp, thiscompnode = components[0] initinletoutlet(idf, thiscomp, thiscompnode, force=False) outletnodename = getnodefieldname(thiscomp, "Outlet_Node_Name", fluid=fluid, startswith=thiscompnode) thiscomp[outletnodename] = [thiscomp[outletnodename], thiscomp[outletnodename]] # inletnodename = getnodefieldname(nextcomp, "Inlet_Node_Name", fluid) # nextcomp[inletnodename] = [nextcomp[inletnodename], betweennodename] return components for i in range(len(components) - 1): thiscomp, thiscompnode = components[i] nextcomp, nextcompnode = components[i + 1] initinletoutlet(idf, thiscomp, thiscompnode, force=False) initinletoutlet(idf, nextcomp, nextcompnode, force=False) betweennodename = "%s_%s_node" % (thiscomp.Name, nextcomp.Name) outletnodename = getnodefieldname(thiscomp, "Outlet_Node_Name", fluid=fluid, startswith=thiscompnode) thiscomp[outletnodename] = [thiscomp[outletnodename], betweennodename] inletnodename = getnodefieldname(nextcomp, "Inlet_Node_Name", fluid) nextcomp[inletnodename] = [nextcomp[inletnodename], betweennodename] return components
python
def connectcomponents(idf, components, fluid=None): """rename nodes so that the components get connected fluid is only needed if there are air and water nodes fluid is Air or Water or ''. if the fluid is Steam, use Water""" if fluid is None: fluid = '' if len(components) == 1: thiscomp, thiscompnode = components[0] initinletoutlet(idf, thiscomp, thiscompnode, force=False) outletnodename = getnodefieldname(thiscomp, "Outlet_Node_Name", fluid=fluid, startswith=thiscompnode) thiscomp[outletnodename] = [thiscomp[outletnodename], thiscomp[outletnodename]] # inletnodename = getnodefieldname(nextcomp, "Inlet_Node_Name", fluid) # nextcomp[inletnodename] = [nextcomp[inletnodename], betweennodename] return components for i in range(len(components) - 1): thiscomp, thiscompnode = components[i] nextcomp, nextcompnode = components[i + 1] initinletoutlet(idf, thiscomp, thiscompnode, force=False) initinletoutlet(idf, nextcomp, nextcompnode, force=False) betweennodename = "%s_%s_node" % (thiscomp.Name, nextcomp.Name) outletnodename = getnodefieldname(thiscomp, "Outlet_Node_Name", fluid=fluid, startswith=thiscompnode) thiscomp[outletnodename] = [thiscomp[outletnodename], betweennodename] inletnodename = getnodefieldname(nextcomp, "Inlet_Node_Name", fluid) nextcomp[inletnodename] = [nextcomp[inletnodename], betweennodename] return components
[ "def", "connectcomponents", "(", "idf", ",", "components", ",", "fluid", "=", "None", ")", ":", "if", "fluid", "is", "None", ":", "fluid", "=", "''", "if", "len", "(", "components", ")", "==", "1", ":", "thiscomp", ",", "thiscompnode", "=", "components", "[", "0", "]", "initinletoutlet", "(", "idf", ",", "thiscomp", ",", "thiscompnode", ",", "force", "=", "False", ")", "outletnodename", "=", "getnodefieldname", "(", "thiscomp", ",", "\"Outlet_Node_Name\"", ",", "fluid", "=", "fluid", ",", "startswith", "=", "thiscompnode", ")", "thiscomp", "[", "outletnodename", "]", "=", "[", "thiscomp", "[", "outletnodename", "]", ",", "thiscomp", "[", "outletnodename", "]", "]", "# inletnodename = getnodefieldname(nextcomp, \"Inlet_Node_Name\", fluid)", "# nextcomp[inletnodename] = [nextcomp[inletnodename], betweennodename]", "return", "components", "for", "i", "in", "range", "(", "len", "(", "components", ")", "-", "1", ")", ":", "thiscomp", ",", "thiscompnode", "=", "components", "[", "i", "]", "nextcomp", ",", "nextcompnode", "=", "components", "[", "i", "+", "1", "]", "initinletoutlet", "(", "idf", ",", "thiscomp", ",", "thiscompnode", ",", "force", "=", "False", ")", "initinletoutlet", "(", "idf", ",", "nextcomp", ",", "nextcompnode", ",", "force", "=", "False", ")", "betweennodename", "=", "\"%s_%s_node\"", "%", "(", "thiscomp", ".", "Name", ",", "nextcomp", ".", "Name", ")", "outletnodename", "=", "getnodefieldname", "(", "thiscomp", ",", "\"Outlet_Node_Name\"", ",", "fluid", "=", "fluid", ",", "startswith", "=", "thiscompnode", ")", "thiscomp", "[", "outletnodename", "]", "=", "[", "thiscomp", "[", "outletnodename", "]", ",", "betweennodename", "]", "inletnodename", "=", "getnodefieldname", "(", "nextcomp", ",", "\"Inlet_Node_Name\"", ",", "fluid", ")", "nextcomp", "[", "inletnodename", "]", "=", "[", "nextcomp", "[", "inletnodename", "]", ",", "betweennodename", "]", "return", "components" ]
rename nodes so that the components get connected fluid is only needed if there are air and water nodes fluid is Air or Water or ''. if the fluid is Steam, use Water
[ "rename", "nodes", "so", "that", "the", "components", "get", "connected", "fluid", "is", "only", "needed", "if", "there", "are", "air", "and", "water", "nodes", "fluid", "is", "Air", "or", "Water", "or", ".", "if", "the", "fluid", "is", "Steam", "use", "Water" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L192-L220
santoshphilip/eppy
eppy/hvacbuilder.py
initinletoutlet
def initinletoutlet(idf, idfobject, thisnode, force=False): """initialze values for all the inlet outlet nodes for the object. if force == False, it willl init only if field = '' """ def blankfield(fieldvalue): """test for blank field""" try: if fieldvalue.strip() == '': return True else: return False except AttributeError: # field may be a list return False def trimfields(fields, thisnode): if len(fields) > 1: if thisnode is not None: fields = [field for field in fields if field.startswith(thisnode)] return fields else: print("Where should this loop connect ?") print("%s - %s" % (idfobject.key, idfobject.Name)) print([field.split("Inlet_Node_Name")[0] for field in inletfields]) raise WhichLoopError else: return fields inletfields = getfieldnamesendswith(idfobject, "Inlet_Node_Name") inletfields = trimfields(inletfields, thisnode) # or warn with exception for inletfield in inletfields: if blankfield(idfobject[inletfield]) == True or force == True: idfobject[inletfield] = "%s_%s" % (idfobject.Name, inletfield) outletfields = getfieldnamesendswith(idfobject, "Outlet_Node_Name") outletfields = trimfields(outletfields, thisnode) # or warn with exception for outletfield in outletfields: if blankfield(idfobject[outletfield]) == True or force == True: idfobject[outletfield] = "%s_%s" % (idfobject.Name, outletfield) return idfobject
python
def initinletoutlet(idf, idfobject, thisnode, force=False): """initialze values for all the inlet outlet nodes for the object. if force == False, it willl init only if field = '' """ def blankfield(fieldvalue): """test for blank field""" try: if fieldvalue.strip() == '': return True else: return False except AttributeError: # field may be a list return False def trimfields(fields, thisnode): if len(fields) > 1: if thisnode is not None: fields = [field for field in fields if field.startswith(thisnode)] return fields else: print("Where should this loop connect ?") print("%s - %s" % (idfobject.key, idfobject.Name)) print([field.split("Inlet_Node_Name")[0] for field in inletfields]) raise WhichLoopError else: return fields inletfields = getfieldnamesendswith(idfobject, "Inlet_Node_Name") inletfields = trimfields(inletfields, thisnode) # or warn with exception for inletfield in inletfields: if blankfield(idfobject[inletfield]) == True or force == True: idfobject[inletfield] = "%s_%s" % (idfobject.Name, inletfield) outletfields = getfieldnamesendswith(idfobject, "Outlet_Node_Name") outletfields = trimfields(outletfields, thisnode) # or warn with exception for outletfield in outletfields: if blankfield(idfobject[outletfield]) == True or force == True: idfobject[outletfield] = "%s_%s" % (idfobject.Name, outletfield) return idfobject
[ "def", "initinletoutlet", "(", "idf", ",", "idfobject", ",", "thisnode", ",", "force", "=", "False", ")", ":", "def", "blankfield", "(", "fieldvalue", ")", ":", "\"\"\"test for blank field\"\"\"", "try", ":", "if", "fieldvalue", ".", "strip", "(", ")", "==", "''", ":", "return", "True", "else", ":", "return", "False", "except", "AttributeError", ":", "# field may be a list", "return", "False", "def", "trimfields", "(", "fields", ",", "thisnode", ")", ":", "if", "len", "(", "fields", ")", ">", "1", ":", "if", "thisnode", "is", "not", "None", ":", "fields", "=", "[", "field", "for", "field", "in", "fields", "if", "field", ".", "startswith", "(", "thisnode", ")", "]", "return", "fields", "else", ":", "print", "(", "\"Where should this loop connect ?\"", ")", "print", "(", "\"%s - %s\"", "%", "(", "idfobject", ".", "key", ",", "idfobject", ".", "Name", ")", ")", "print", "(", "[", "field", ".", "split", "(", "\"Inlet_Node_Name\"", ")", "[", "0", "]", "for", "field", "in", "inletfields", "]", ")", "raise", "WhichLoopError", "else", ":", "return", "fields", "inletfields", "=", "getfieldnamesendswith", "(", "idfobject", ",", "\"Inlet_Node_Name\"", ")", "inletfields", "=", "trimfields", "(", "inletfields", ",", "thisnode", ")", "# or warn with exception", "for", "inletfield", "in", "inletfields", ":", "if", "blankfield", "(", "idfobject", "[", "inletfield", "]", ")", "==", "True", "or", "force", "==", "True", ":", "idfobject", "[", "inletfield", "]", "=", "\"%s_%s\"", "%", "(", "idfobject", ".", "Name", ",", "inletfield", ")", "outletfields", "=", "getfieldnamesendswith", "(", "idfobject", ",", "\"Outlet_Node_Name\"", ")", "outletfields", "=", "trimfields", "(", "outletfields", ",", "thisnode", ")", "# or warn with exception", "for", "outletfield", "in", "outletfields", ":", "if", "blankfield", "(", "idfobject", "[", "outletfield", "]", ")", "==", "True", "or", "force", "==", "True", ":", "idfobject", "[", "outletfield", "]", "=", "\"%s_%s\"", "%", "(", "idfobject", ".", "Name", ",", "outletfield", ")", "return", "idfobject" ]
initialze values for all the inlet outlet nodes for the object. if force == False, it willl init only if field = ''
[ "initialze", "values", "for", "all", "the", "inlet", "outlet", "nodes", "for", "the", "object", ".", "if", "force", "==", "False", "it", "willl", "init", "only", "if", "field", "=" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L223-L260
santoshphilip/eppy
eppy/hvacbuilder.py
componentsintobranch
def componentsintobranch(idf, branch, listofcomponents, fluid=None): """insert a list of components into a branch fluid is only needed if there are air and water nodes in same object fluid is Air or Water or ''. if the fluid is Steam, use Water""" if fluid is None: fluid = '' componentlist = [item[0] for item in listofcomponents] # assumes that the nodes of the component connect to each other # empty branch if it has existing components thebranchname = branch.Name thebranch = idf.removeextensibles('BRANCH', thebranchname) # empty the branch # fill in the new components with the node names into this branch # find the first extensible field and fill in the data in obj. e_index = idf.getextensibleindex('BRANCH', thebranchname) theobj = thebranch.obj modeleditor.extendlist(theobj, e_index) # just being careful here for comp, compnode in listofcomponents: theobj.append(comp.key) theobj.append(comp.Name) inletnodename = getnodefieldname(comp, "Inlet_Node_Name", fluid=fluid, startswith=compnode) theobj.append(comp[inletnodename]) outletnodename = getnodefieldname(comp, "Outlet_Node_Name", fluid=fluid, startswith=compnode) theobj.append(comp[outletnodename]) theobj.append('') return thebranch
python
def componentsintobranch(idf, branch, listofcomponents, fluid=None): """insert a list of components into a branch fluid is only needed if there are air and water nodes in same object fluid is Air or Water or ''. if the fluid is Steam, use Water""" if fluid is None: fluid = '' componentlist = [item[0] for item in listofcomponents] # assumes that the nodes of the component connect to each other # empty branch if it has existing components thebranchname = branch.Name thebranch = idf.removeextensibles('BRANCH', thebranchname) # empty the branch # fill in the new components with the node names into this branch # find the first extensible field and fill in the data in obj. e_index = idf.getextensibleindex('BRANCH', thebranchname) theobj = thebranch.obj modeleditor.extendlist(theobj, e_index) # just being careful here for comp, compnode in listofcomponents: theobj.append(comp.key) theobj.append(comp.Name) inletnodename = getnodefieldname(comp, "Inlet_Node_Name", fluid=fluid, startswith=compnode) theobj.append(comp[inletnodename]) outletnodename = getnodefieldname(comp, "Outlet_Node_Name", fluid=fluid, startswith=compnode) theobj.append(comp[outletnodename]) theobj.append('') return thebranch
[ "def", "componentsintobranch", "(", "idf", ",", "branch", ",", "listofcomponents", ",", "fluid", "=", "None", ")", ":", "if", "fluid", "is", "None", ":", "fluid", "=", "''", "componentlist", "=", "[", "item", "[", "0", "]", "for", "item", "in", "listofcomponents", "]", "# assumes that the nodes of the component connect to each other", "# empty branch if it has existing components", "thebranchname", "=", "branch", ".", "Name", "thebranch", "=", "idf", ".", "removeextensibles", "(", "'BRANCH'", ",", "thebranchname", ")", "# empty the branch", "# fill in the new components with the node names into this branch", "# find the first extensible field and fill in the data in obj.", "e_index", "=", "idf", ".", "getextensibleindex", "(", "'BRANCH'", ",", "thebranchname", ")", "theobj", "=", "thebranch", ".", "obj", "modeleditor", ".", "extendlist", "(", "theobj", ",", "e_index", ")", "# just being careful here", "for", "comp", ",", "compnode", "in", "listofcomponents", ":", "theobj", ".", "append", "(", "comp", ".", "key", ")", "theobj", ".", "append", "(", "comp", ".", "Name", ")", "inletnodename", "=", "getnodefieldname", "(", "comp", ",", "\"Inlet_Node_Name\"", ",", "fluid", "=", "fluid", ",", "startswith", "=", "compnode", ")", "theobj", ".", "append", "(", "comp", "[", "inletnodename", "]", ")", "outletnodename", "=", "getnodefieldname", "(", "comp", ",", "\"Outlet_Node_Name\"", ",", "fluid", "=", "fluid", ",", "startswith", "=", "compnode", ")", "theobj", ".", "append", "(", "comp", "[", "outletnodename", "]", ")", "theobj", ".", "append", "(", "''", ")", "return", "thebranch" ]
insert a list of components into a branch fluid is only needed if there are air and water nodes in same object fluid is Air or Water or ''. if the fluid is Steam, use Water
[ "insert", "a", "list", "of", "components", "into", "a", "branch", "fluid", "is", "only", "needed", "if", "there", "are", "air", "and", "water", "nodes", "in", "same", "object", "fluid", "is", "Air", "or", "Water", "or", ".", "if", "the", "fluid", "is", "Steam", "use", "Water" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L262-L290
santoshphilip/eppy
eppy/hvacbuilder.py
makeairloop
def makeairloop(idf, loopname, sloop, dloop, testing=None): """make an airloop""" # -------- testing --------- testn = 0 # -------- testing --------- newairloop = idf.newidfobject("AirLoopHVAC".upper(), Name=loopname) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- fields = SomeFields.a_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] # simplify naming fields1 = ['Branches', 'Connectors', 'Supply Inlet', 'Demand Outlet', 'Demand Inlet', 'Supply Outlet'] # old TODO : pop connectors if no parallel branches # make fieldnames in the air loop fieldnames = ['%s %s' % (loopname, field) for field in fields1] for fieldname, thefield in zip(fieldnames, flnames): newairloop[thefield] = fieldname # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make the branch lists for this air loop sbranchlist = idf.newidfobject("BRANCHLIST", Name=newairloop[flnames[0]]) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # add branch names to the branchlist sbranchnames = flattencopy(sloop) # sbranchnames = sloop[1] for branchname in sbranchnames: sbranchlist.obj.append(branchname) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # supply side sbranchs = [] for bname in sbranchnames: branch = makeductbranch(idf, bname) sbranchs.append(branch) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # rename inlet outlet of endpoints of loop anode = "Component_1_Inlet_Node_Name" sameinnode = "Supply_Side_Inlet_Node_Name" # TODO : change ? sbranchs[0][anode] = newairloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Supply_Side_Outlet_Node_Names" # TODO : change ? sbranchs[-1][anode] = newairloop[sameoutnode] # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # rename inlet outlet of endpoints of loop - rename in pipe dname = sbranchs[0]['Component_1_Name'] # get the duct name aduct = idf.getobject('duct'.upper(), dname) # get duct aduct.Inlet_Node_Name = newairloop[sameinnode] dname = sbranchs[-1]['Component_1_Name'] # get the duct name aduct = idf.getobject('duct'.upper(), dname) # get duct aduct.Outlet_Node_Name = newairloop[sameoutnode] # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # # # TODO : test if there are parallel branches # make the connectorlist an fill fields sconnlist = idf.newidfobject("CONNECTORLIST", Name=newairloop.Connector_List_Name) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- sconnlist.Connector_1_Object_Type = "Connector:Splitter" sconnlist.Connector_1_Name = "%s_supply_splitter" % (loopname,) sconnlist.Connector_2_Object_Type = "Connector:Mixer" sconnlist.Connector_2_Name = "%s_supply_mixer" % (loopname,) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make splitters and mixers s_splitter = idf.newidfobject("CONNECTOR:SPLITTER", Name=sconnlist.Connector_1_Name) s_splitter.obj.extend([sloop[0]] + sloop[1]) s_mixer = idf.newidfobject("CONNECTOR:MIXER", Name=sconnlist.Connector_2_Name) s_mixer.obj.extend([sloop[-1]] + sloop[1]) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # demand side loop for airloop is made below # ZoneHVAC:EquipmentConnections for zone in dloop: equipconn = idf.newidfobject("ZoneHVAC:EquipmentConnections".upper()) equipconn.Zone_Name = zone fldname = "Zone_Conditioning_Equipment_List_Name" equipconn[fldname] = "%s equip list" % (zone,) fldname = "Zone_Air_Inlet_Node_or_NodeList_Name" equipconn[fldname] = "%s Inlet Node" % (zone,) fldname = "Zone_Air_Node_Name" equipconn[fldname] = "%s Node" % (zone,) fldname = "Zone_Return_Air_Node_Name" equipconn[fldname] = "%s Outlet Node" % (zone,) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make ZoneHVAC:EquipmentList for zone in dloop: z_equiplst = idf.newidfobject("ZoneHVAC:EquipmentList".upper()) z_equipconn = modeleditor.getobjects( idf.idfobjects, idf.model, idf.idd_info, "ZoneHVAC:EquipmentConnections".upper(), # places=7, **dict(Zone_Name=zone))[0] z_equiplst.Name = z_equipconn.Zone_Conditioning_Equipment_List_Name fld = "Zone_Equipment_1_Object_Type" z_equiplst[fld] = "AirTerminal:SingleDuct:Uncontrolled" z_equiplst.Zone_Equipment_1_Name = "%sDirectAir" % (zone,) z_equiplst.Zone_Equipment_1_Cooling_Sequence = 1 z_equiplst.Zone_Equipment_1_Heating_or_NoLoad_Sequence = 1 # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make AirTerminal:SingleDuct:Uncontrolled for zone in dloop: z_equipconn = modeleditor.getobjects( idf.idfobjects, idf.model, idf.idd_info, "ZoneHVAC:EquipmentConnections".upper(), # places=7, **dict(Zone_Name=zone))[0] key = "AirTerminal:SingleDuct:Uncontrolled".upper() z_airterm = idf.newidfobject(key) z_airterm.Name = "%sDirectAir" % (zone,) fld1 = "Zone_Supply_Air_Node_Name" fld2 = "Zone_Air_Inlet_Node_or_NodeList_Name" z_airterm[fld1] = z_equipconn[fld2] z_airterm.Maximum_Air_Flow_Rate = 'autosize' # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # MAKE AirLoopHVAC:ZoneSplitter # zone = dloop[0] key = "AirLoopHVAC:ZoneSplitter".upper() z_splitter = idf.newidfobject(key) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- z_splitter.Name = "%s Demand Side Splitter" % (loopname,) z_splitter.Inlet_Node_Name = newairloop.Demand_Side_Inlet_Node_Names for i, zone in enumerate(dloop): z_equipconn = modeleditor.getobjects( idf.idfobjects, idf.model, idf.idd_info, "ZoneHVAC:EquipmentConnections".upper(), # places=7, **dict(Zone_Name=zone))[0] fld = "Outlet_%s_Node_Name" % (i + 1,) z_splitter[fld] = z_equipconn.Zone_Air_Inlet_Node_or_NodeList_Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make AirLoopHVAC:SupplyPath key = "AirLoopHVAC:SupplyPath".upper() z_supplypth = idf.newidfobject(key) z_supplypth.Name = "%sSupplyPath" % (loopname,) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- fld1 = "Supply_Air_Path_Inlet_Node_Name" fld2 = "Demand_Side_Inlet_Node_Names" z_supplypth[fld1] = newairloop[fld2] z_supplypth.Component_1_Object_Type = "AirLoopHVAC:ZoneSplitter" z_supplypth.Component_1_Name = z_splitter.Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make AirLoopHVAC:ZoneMixer key = "AirLoopHVAC:ZoneMixer".upper() z_mixer = idf.newidfobject(key) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- z_mixer.Name = "%s Demand Side Mixer" % (loopname,) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- z_mixer.Outlet_Node_Name = newairloop.Demand_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- for i, zone in enumerate(dloop): z_equipconn = modeleditor.getobjects( idf.idfobjects, idf.model, idf.idd_info, "ZoneHVAC:EquipmentConnections".upper(), # places=7, **dict(Zone_Name=zone))[0] fld = "Inlet_%s_Node_Name" % (i + 1,) z_mixer[fld] = z_equipconn.Zone_Return_Air_Node_Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make AirLoopHVAC:ReturnPath key = "AirLoopHVAC:ReturnPath".upper() z_returnpth = idf.newidfobject(key) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- z_returnpth.Name = "%sReturnPath" % (loopname,) z_returnpth.Return_Air_Path_Outlet_Node_Name = newairloop.Demand_Side_Outlet_Node_Name z_returnpth.Component_1_Object_Type = "AirLoopHVAC:ZoneMixer" z_returnpth.Component_1_Name = z_mixer.Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- return newairloop
python
def makeairloop(idf, loopname, sloop, dloop, testing=None): """make an airloop""" # -------- testing --------- testn = 0 # -------- testing --------- newairloop = idf.newidfobject("AirLoopHVAC".upper(), Name=loopname) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- fields = SomeFields.a_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] # simplify naming fields1 = ['Branches', 'Connectors', 'Supply Inlet', 'Demand Outlet', 'Demand Inlet', 'Supply Outlet'] # old TODO : pop connectors if no parallel branches # make fieldnames in the air loop fieldnames = ['%s %s' % (loopname, field) for field in fields1] for fieldname, thefield in zip(fieldnames, flnames): newairloop[thefield] = fieldname # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make the branch lists for this air loop sbranchlist = idf.newidfobject("BRANCHLIST", Name=newairloop[flnames[0]]) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # add branch names to the branchlist sbranchnames = flattencopy(sloop) # sbranchnames = sloop[1] for branchname in sbranchnames: sbranchlist.obj.append(branchname) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # supply side sbranchs = [] for bname in sbranchnames: branch = makeductbranch(idf, bname) sbranchs.append(branch) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # rename inlet outlet of endpoints of loop anode = "Component_1_Inlet_Node_Name" sameinnode = "Supply_Side_Inlet_Node_Name" # TODO : change ? sbranchs[0][anode] = newairloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Supply_Side_Outlet_Node_Names" # TODO : change ? sbranchs[-1][anode] = newairloop[sameoutnode] # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # rename inlet outlet of endpoints of loop - rename in pipe dname = sbranchs[0]['Component_1_Name'] # get the duct name aduct = idf.getobject('duct'.upper(), dname) # get duct aduct.Inlet_Node_Name = newairloop[sameinnode] dname = sbranchs[-1]['Component_1_Name'] # get the duct name aduct = idf.getobject('duct'.upper(), dname) # get duct aduct.Outlet_Node_Name = newairloop[sameoutnode] # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # # # TODO : test if there are parallel branches # make the connectorlist an fill fields sconnlist = idf.newidfobject("CONNECTORLIST", Name=newairloop.Connector_List_Name) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- sconnlist.Connector_1_Object_Type = "Connector:Splitter" sconnlist.Connector_1_Name = "%s_supply_splitter" % (loopname,) sconnlist.Connector_2_Object_Type = "Connector:Mixer" sconnlist.Connector_2_Name = "%s_supply_mixer" % (loopname,) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make splitters and mixers s_splitter = idf.newidfobject("CONNECTOR:SPLITTER", Name=sconnlist.Connector_1_Name) s_splitter.obj.extend([sloop[0]] + sloop[1]) s_mixer = idf.newidfobject("CONNECTOR:MIXER", Name=sconnlist.Connector_2_Name) s_mixer.obj.extend([sloop[-1]] + sloop[1]) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # demand side loop for airloop is made below # ZoneHVAC:EquipmentConnections for zone in dloop: equipconn = idf.newidfobject("ZoneHVAC:EquipmentConnections".upper()) equipconn.Zone_Name = zone fldname = "Zone_Conditioning_Equipment_List_Name" equipconn[fldname] = "%s equip list" % (zone,) fldname = "Zone_Air_Inlet_Node_or_NodeList_Name" equipconn[fldname] = "%s Inlet Node" % (zone,) fldname = "Zone_Air_Node_Name" equipconn[fldname] = "%s Node" % (zone,) fldname = "Zone_Return_Air_Node_Name" equipconn[fldname] = "%s Outlet Node" % (zone,) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make ZoneHVAC:EquipmentList for zone in dloop: z_equiplst = idf.newidfobject("ZoneHVAC:EquipmentList".upper()) z_equipconn = modeleditor.getobjects( idf.idfobjects, idf.model, idf.idd_info, "ZoneHVAC:EquipmentConnections".upper(), # places=7, **dict(Zone_Name=zone))[0] z_equiplst.Name = z_equipconn.Zone_Conditioning_Equipment_List_Name fld = "Zone_Equipment_1_Object_Type" z_equiplst[fld] = "AirTerminal:SingleDuct:Uncontrolled" z_equiplst.Zone_Equipment_1_Name = "%sDirectAir" % (zone,) z_equiplst.Zone_Equipment_1_Cooling_Sequence = 1 z_equiplst.Zone_Equipment_1_Heating_or_NoLoad_Sequence = 1 # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make AirTerminal:SingleDuct:Uncontrolled for zone in dloop: z_equipconn = modeleditor.getobjects( idf.idfobjects, idf.model, idf.idd_info, "ZoneHVAC:EquipmentConnections".upper(), # places=7, **dict(Zone_Name=zone))[0] key = "AirTerminal:SingleDuct:Uncontrolled".upper() z_airterm = idf.newidfobject(key) z_airterm.Name = "%sDirectAir" % (zone,) fld1 = "Zone_Supply_Air_Node_Name" fld2 = "Zone_Air_Inlet_Node_or_NodeList_Name" z_airterm[fld1] = z_equipconn[fld2] z_airterm.Maximum_Air_Flow_Rate = 'autosize' # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # MAKE AirLoopHVAC:ZoneSplitter # zone = dloop[0] key = "AirLoopHVAC:ZoneSplitter".upper() z_splitter = idf.newidfobject(key) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- z_splitter.Name = "%s Demand Side Splitter" % (loopname,) z_splitter.Inlet_Node_Name = newairloop.Demand_Side_Inlet_Node_Names for i, zone in enumerate(dloop): z_equipconn = modeleditor.getobjects( idf.idfobjects, idf.model, idf.idd_info, "ZoneHVAC:EquipmentConnections".upper(), # places=7, **dict(Zone_Name=zone))[0] fld = "Outlet_%s_Node_Name" % (i + 1,) z_splitter[fld] = z_equipconn.Zone_Air_Inlet_Node_or_NodeList_Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make AirLoopHVAC:SupplyPath key = "AirLoopHVAC:SupplyPath".upper() z_supplypth = idf.newidfobject(key) z_supplypth.Name = "%sSupplyPath" % (loopname,) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- fld1 = "Supply_Air_Path_Inlet_Node_Name" fld2 = "Demand_Side_Inlet_Node_Names" z_supplypth[fld1] = newairloop[fld2] z_supplypth.Component_1_Object_Type = "AirLoopHVAC:ZoneSplitter" z_supplypth.Component_1_Name = z_splitter.Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make AirLoopHVAC:ZoneMixer key = "AirLoopHVAC:ZoneMixer".upper() z_mixer = idf.newidfobject(key) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- z_mixer.Name = "%s Demand Side Mixer" % (loopname,) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- z_mixer.Outlet_Node_Name = newairloop.Demand_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- for i, zone in enumerate(dloop): z_equipconn = modeleditor.getobjects( idf.idfobjects, idf.model, idf.idd_info, "ZoneHVAC:EquipmentConnections".upper(), # places=7, **dict(Zone_Name=zone))[0] fld = "Inlet_%s_Node_Name" % (i + 1,) z_mixer[fld] = z_equipconn.Zone_Return_Air_Node_Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- # make AirLoopHVAC:ReturnPath key = "AirLoopHVAC:ReturnPath".upper() z_returnpth = idf.newidfobject(key) # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- z_returnpth.Name = "%sReturnPath" % (loopname,) z_returnpth.Return_Air_Path_Outlet_Node_Name = newairloop.Demand_Side_Outlet_Node_Name z_returnpth.Component_1_Object_Type = "AirLoopHVAC:ZoneMixer" z_returnpth.Component_1_Name = z_mixer.Name # -------- testing --------- testn = doingtesting(testing, testn, newairloop) if testn == None: returnnone() # -------- testing --------- return newairloop
[ "def", "makeairloop", "(", "idf", ",", "loopname", ",", "sloop", ",", "dloop", ",", "testing", "=", "None", ")", ":", "# -------- testing ---------", "testn", "=", "0", "# -------- testing ---------", "newairloop", "=", "idf", ".", "newidfobject", "(", "\"AirLoopHVAC\"", ".", "upper", "(", ")", ",", "Name", "=", "loopname", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "fields", "=", "SomeFields", ".", "a_fields", "# for use in bunch", "flnames", "=", "[", "field", ".", "replace", "(", "' '", ",", "'_'", ")", "for", "field", "in", "fields", "]", "# simplify naming", "fields1", "=", "[", "'Branches'", ",", "'Connectors'", ",", "'Supply Inlet'", ",", "'Demand Outlet'", ",", "'Demand Inlet'", ",", "'Supply Outlet'", "]", "# old TODO : pop connectors if no parallel branches", "# make fieldnames in the air loop", "fieldnames", "=", "[", "'%s %s'", "%", "(", "loopname", ",", "field", ")", "for", "field", "in", "fields1", "]", "for", "fieldname", ",", "thefield", "in", "zip", "(", "fieldnames", ",", "flnames", ")", ":", "newairloop", "[", "thefield", "]", "=", "fieldname", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# make the branch lists for this air loop", "sbranchlist", "=", "idf", ".", "newidfobject", "(", "\"BRANCHLIST\"", ",", "Name", "=", "newairloop", "[", "flnames", "[", "0", "]", "]", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# add branch names to the branchlist", "sbranchnames", "=", "flattencopy", "(", "sloop", ")", "# sbranchnames = sloop[1]", "for", "branchname", "in", "sbranchnames", ":", "sbranchlist", ".", "obj", ".", "append", "(", "branchname", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# supply side", "sbranchs", "=", "[", "]", "for", "bname", "in", "sbranchnames", ":", "branch", "=", "makeductbranch", "(", "idf", ",", "bname", ")", "sbranchs", ".", "append", "(", "branch", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# rename inlet outlet of endpoints of loop", "anode", "=", "\"Component_1_Inlet_Node_Name\"", "sameinnode", "=", "\"Supply_Side_Inlet_Node_Name\"", "# TODO : change ?", "sbranchs", "[", "0", "]", "[", "anode", "]", "=", "newairloop", "[", "sameinnode", "]", "anode", "=", "\"Component_1_Outlet_Node_Name\"", "sameoutnode", "=", "\"Supply_Side_Outlet_Node_Names\"", "# TODO : change ?", "sbranchs", "[", "-", "1", "]", "[", "anode", "]", "=", "newairloop", "[", "sameoutnode", "]", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# rename inlet outlet of endpoints of loop - rename in pipe", "dname", "=", "sbranchs", "[", "0", "]", "[", "'Component_1_Name'", "]", "# get the duct name", "aduct", "=", "idf", ".", "getobject", "(", "'duct'", ".", "upper", "(", ")", ",", "dname", ")", "# get duct", "aduct", ".", "Inlet_Node_Name", "=", "newairloop", "[", "sameinnode", "]", "dname", "=", "sbranchs", "[", "-", "1", "]", "[", "'Component_1_Name'", "]", "# get the duct name", "aduct", "=", "idf", ".", "getobject", "(", "'duct'", ".", "upper", "(", ")", ",", "dname", ")", "# get duct", "aduct", ".", "Outlet_Node_Name", "=", "newairloop", "[", "sameoutnode", "]", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "#", "# # TODO : test if there are parallel branches", "# make the connectorlist an fill fields", "sconnlist", "=", "idf", ".", "newidfobject", "(", "\"CONNECTORLIST\"", ",", "Name", "=", "newairloop", ".", "Connector_List_Name", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "sconnlist", ".", "Connector_1_Object_Type", "=", "\"Connector:Splitter\"", "sconnlist", ".", "Connector_1_Name", "=", "\"%s_supply_splitter\"", "%", "(", "loopname", ",", ")", "sconnlist", ".", "Connector_2_Object_Type", "=", "\"Connector:Mixer\"", "sconnlist", ".", "Connector_2_Name", "=", "\"%s_supply_mixer\"", "%", "(", "loopname", ",", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# make splitters and mixers", "s_splitter", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:SPLITTER\"", ",", "Name", "=", "sconnlist", ".", "Connector_1_Name", ")", "s_splitter", ".", "obj", ".", "extend", "(", "[", "sloop", "[", "0", "]", "]", "+", "sloop", "[", "1", "]", ")", "s_mixer", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:MIXER\"", ",", "Name", "=", "sconnlist", ".", "Connector_2_Name", ")", "s_mixer", ".", "obj", ".", "extend", "(", "[", "sloop", "[", "-", "1", "]", "]", "+", "sloop", "[", "1", "]", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# demand side loop for airloop is made below", "# ZoneHVAC:EquipmentConnections", "for", "zone", "in", "dloop", ":", "equipconn", "=", "idf", ".", "newidfobject", "(", "\"ZoneHVAC:EquipmentConnections\"", ".", "upper", "(", ")", ")", "equipconn", ".", "Zone_Name", "=", "zone", "fldname", "=", "\"Zone_Conditioning_Equipment_List_Name\"", "equipconn", "[", "fldname", "]", "=", "\"%s equip list\"", "%", "(", "zone", ",", ")", "fldname", "=", "\"Zone_Air_Inlet_Node_or_NodeList_Name\"", "equipconn", "[", "fldname", "]", "=", "\"%s Inlet Node\"", "%", "(", "zone", ",", ")", "fldname", "=", "\"Zone_Air_Node_Name\"", "equipconn", "[", "fldname", "]", "=", "\"%s Node\"", "%", "(", "zone", ",", ")", "fldname", "=", "\"Zone_Return_Air_Node_Name\"", "equipconn", "[", "fldname", "]", "=", "\"%s Outlet Node\"", "%", "(", "zone", ",", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# make ZoneHVAC:EquipmentList", "for", "zone", "in", "dloop", ":", "z_equiplst", "=", "idf", ".", "newidfobject", "(", "\"ZoneHVAC:EquipmentList\"", ".", "upper", "(", ")", ")", "z_equipconn", "=", "modeleditor", ".", "getobjects", "(", "idf", ".", "idfobjects", ",", "idf", ".", "model", ",", "idf", ".", "idd_info", ",", "\"ZoneHVAC:EquipmentConnections\"", ".", "upper", "(", ")", ",", "# places=7,", "*", "*", "dict", "(", "Zone_Name", "=", "zone", ")", ")", "[", "0", "]", "z_equiplst", ".", "Name", "=", "z_equipconn", ".", "Zone_Conditioning_Equipment_List_Name", "fld", "=", "\"Zone_Equipment_1_Object_Type\"", "z_equiplst", "[", "fld", "]", "=", "\"AirTerminal:SingleDuct:Uncontrolled\"", "z_equiplst", ".", "Zone_Equipment_1_Name", "=", "\"%sDirectAir\"", "%", "(", "zone", ",", ")", "z_equiplst", ".", "Zone_Equipment_1_Cooling_Sequence", "=", "1", "z_equiplst", ".", "Zone_Equipment_1_Heating_or_NoLoad_Sequence", "=", "1", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# make AirTerminal:SingleDuct:Uncontrolled", "for", "zone", "in", "dloop", ":", "z_equipconn", "=", "modeleditor", ".", "getobjects", "(", "idf", ".", "idfobjects", ",", "idf", ".", "model", ",", "idf", ".", "idd_info", ",", "\"ZoneHVAC:EquipmentConnections\"", ".", "upper", "(", ")", ",", "# places=7,", "*", "*", "dict", "(", "Zone_Name", "=", "zone", ")", ")", "[", "0", "]", "key", "=", "\"AirTerminal:SingleDuct:Uncontrolled\"", ".", "upper", "(", ")", "z_airterm", "=", "idf", ".", "newidfobject", "(", "key", ")", "z_airterm", ".", "Name", "=", "\"%sDirectAir\"", "%", "(", "zone", ",", ")", "fld1", "=", "\"Zone_Supply_Air_Node_Name\"", "fld2", "=", "\"Zone_Air_Inlet_Node_or_NodeList_Name\"", "z_airterm", "[", "fld1", "]", "=", "z_equipconn", "[", "fld2", "]", "z_airterm", ".", "Maximum_Air_Flow_Rate", "=", "'autosize'", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# MAKE AirLoopHVAC:ZoneSplitter", "# zone = dloop[0]", "key", "=", "\"AirLoopHVAC:ZoneSplitter\"", ".", "upper", "(", ")", "z_splitter", "=", "idf", ".", "newidfobject", "(", "key", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "z_splitter", ".", "Name", "=", "\"%s Demand Side Splitter\"", "%", "(", "loopname", ",", ")", "z_splitter", ".", "Inlet_Node_Name", "=", "newairloop", ".", "Demand_Side_Inlet_Node_Names", "for", "i", ",", "zone", "in", "enumerate", "(", "dloop", ")", ":", "z_equipconn", "=", "modeleditor", ".", "getobjects", "(", "idf", ".", "idfobjects", ",", "idf", ".", "model", ",", "idf", ".", "idd_info", ",", "\"ZoneHVAC:EquipmentConnections\"", ".", "upper", "(", ")", ",", "# places=7,", "*", "*", "dict", "(", "Zone_Name", "=", "zone", ")", ")", "[", "0", "]", "fld", "=", "\"Outlet_%s_Node_Name\"", "%", "(", "i", "+", "1", ",", ")", "z_splitter", "[", "fld", "]", "=", "z_equipconn", ".", "Zone_Air_Inlet_Node_or_NodeList_Name", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# make AirLoopHVAC:SupplyPath", "key", "=", "\"AirLoopHVAC:SupplyPath\"", ".", "upper", "(", ")", "z_supplypth", "=", "idf", ".", "newidfobject", "(", "key", ")", "z_supplypth", ".", "Name", "=", "\"%sSupplyPath\"", "%", "(", "loopname", ",", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "fld1", "=", "\"Supply_Air_Path_Inlet_Node_Name\"", "fld2", "=", "\"Demand_Side_Inlet_Node_Names\"", "z_supplypth", "[", "fld1", "]", "=", "newairloop", "[", "fld2", "]", "z_supplypth", ".", "Component_1_Object_Type", "=", "\"AirLoopHVAC:ZoneSplitter\"", "z_supplypth", ".", "Component_1_Name", "=", "z_splitter", ".", "Name", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# make AirLoopHVAC:ZoneMixer", "key", "=", "\"AirLoopHVAC:ZoneMixer\"", ".", "upper", "(", ")", "z_mixer", "=", "idf", ".", "newidfobject", "(", "key", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "z_mixer", ".", "Name", "=", "\"%s Demand Side Mixer\"", "%", "(", "loopname", ",", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "z_mixer", ".", "Outlet_Node_Name", "=", "newairloop", ".", "Demand_Side_Outlet_Node_Name", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "for", "i", ",", "zone", "in", "enumerate", "(", "dloop", ")", ":", "z_equipconn", "=", "modeleditor", ".", "getobjects", "(", "idf", ".", "idfobjects", ",", "idf", ".", "model", ",", "idf", ".", "idd_info", ",", "\"ZoneHVAC:EquipmentConnections\"", ".", "upper", "(", ")", ",", "# places=7,", "*", "*", "dict", "(", "Zone_Name", "=", "zone", ")", ")", "[", "0", "]", "fld", "=", "\"Inlet_%s_Node_Name\"", "%", "(", "i", "+", "1", ",", ")", "z_mixer", "[", "fld", "]", "=", "z_equipconn", ".", "Zone_Return_Air_Node_Name", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# make AirLoopHVAC:ReturnPath", "key", "=", "\"AirLoopHVAC:ReturnPath\"", ".", "upper", "(", ")", "z_returnpth", "=", "idf", ".", "newidfobject", "(", "key", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "z_returnpth", ".", "Name", "=", "\"%sReturnPath\"", "%", "(", "loopname", ",", ")", "z_returnpth", ".", "Return_Air_Path_Outlet_Node_Name", "=", "newairloop", ".", "Demand_Side_Outlet_Node_Name", "z_returnpth", ".", "Component_1_Object_Type", "=", "\"AirLoopHVAC:ZoneMixer\"", "z_returnpth", ".", "Component_1_Name", "=", "z_mixer", ".", "Name", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newairloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "return", "newairloop" ]
make an airloop
[ "make", "an", "airloop" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L305-L573
santoshphilip/eppy
eppy/hvacbuilder.py
makeplantloop
def makeplantloop(idf, loopname, sloop, dloop, testing=None): """make plant loop with pip components""" # -------- <testing --------- testn = 0 # -------- testing> --------- newplantloop = idf.newidfobject("PLANTLOOP", Name=loopname) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- fields = SomeFields.p_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] # simplify naming fields1 = [field.replace('Plant Side', 'Supply') for field in fields] fields1 = [field.replace('Demand Side', 'Demand') for field in fields1] fields1 = [field[:field.find('Name') - 1] for field in fields1] fields1 = [field.replace(' Node', '') for field in fields1] fields1 = [field.replace(' List', 's') for field in fields1] # TODO : pop connectors if no parallel branches # make fieldnames in the plant loop fieldnames = ['%s %s' % (loopname, field) for field in fields1] for fieldname, thefield in zip(fieldnames, flnames): newplantloop[thefield] = fieldname # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # make the branch lists for this plant loop sbranchlist = idf.newidfobject( "BRANCHLIST", Name=newplantloop.Plant_Side_Branch_List_Name) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- dbranchlist = idf.newidfobject( "BRANCHLIST", Name=newplantloop.Demand_Side_Branch_List_Name) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # add branch names to the branchlist sbranchnames = flattencopy(sloop) # sbranchnames = sloop[1] for branchname in sbranchnames: sbranchlist.obj.append(branchname) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- dbranchnames = flattencopy(dloop) # dbranchnames = dloop[1] for branchname in dbranchnames: dbranchlist.obj.append(branchname) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # make a pipe branch for all branches in the loop # supply side sbranchs = [] for bname in sbranchnames: branch = makepipebranch(idf, bname) sbranchs.append(branch) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop anode = "Component_1_Inlet_Node_Name" sameinnode = "Plant_Side_Inlet_Node_Name" sbranchs[0][anode] = newplantloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Plant_Side_Outlet_Node_Name" sbranchs[-1][anode] = newplantloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in pipe pname = sbranchs[0]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Inlet_Node_Name = newplantloop[sameinnode] pname = sbranchs[-1]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Outlet_Node_Name = newplantloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # demand side dbranchs = [] for bname in dbranchnames: branch = makepipebranch(idf, bname) dbranchs.append(branch) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in branch anode = "Component_1_Inlet_Node_Name" sameinnode = "Demand_Side_Inlet_Node_Name" dbranchs[0][anode] = newplantloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Demand_Side_Outlet_Node_Name" dbranchs[-1][anode] = newplantloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in pipe pname = dbranchs[0]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Inlet_Node_Name = newplantloop[sameinnode] pname = dbranchs[-1]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Outlet_Node_Name = newplantloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # TODO : test if there are parallel branches # make the connectorlist an fill fields sconnlist = idf.newidfobject( "CONNECTORLIST", Name=newplantloop.Plant_Side_Connector_List_Name) sconnlist.Connector_1_Object_Type = "Connector:Splitter" sconnlist.Connector_1_Name = "%s_supply_splitter" % (loopname,) sconnlist.Connector_2_Object_Type = "Connector:Mixer" sconnlist.Connector_2_Name = "%s_supply_mixer" % (loopname,) dconnlist = idf.newidfobject( "CONNECTORLIST", Name=newplantloop.Demand_Side_Connector_List_Name) dconnlist.Connector_1_Object_Type = "Connector:Splitter" dconnlist.Connector_1_Name = "%s_demand_splitter" % (loopname,) dconnlist.Connector_2_Object_Type = "Connector:Mixer" dconnlist.Connector_2_Name = "%s_demand_mixer" % (loopname,) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # make splitters and mixers s_splitter = idf.newidfobject( "CONNECTOR:SPLITTER", Name=sconnlist.Connector_1_Name) s_splitter.obj.extend([sloop[0]] + sloop[1]) s_mixer = idf.newidfobject( "CONNECTOR:MIXER", Name=sconnlist.Connector_2_Name) s_mixer.obj.extend([sloop[-1]] + sloop[1]) # - d_splitter = idf.newidfobject( "CONNECTOR:SPLITTER", Name=dconnlist.Connector_1_Name) d_splitter.obj.extend([dloop[0]] + dloop[1]) d_mixer = idf.newidfobject( "CONNECTOR:MIXER", Name=dconnlist.Connector_2_Name) d_mixer.obj.extend([dloop[-1]] + dloop[1]) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- return newplantloop
python
def makeplantloop(idf, loopname, sloop, dloop, testing=None): """make plant loop with pip components""" # -------- <testing --------- testn = 0 # -------- testing> --------- newplantloop = idf.newidfobject("PLANTLOOP", Name=loopname) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- fields = SomeFields.p_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] # simplify naming fields1 = [field.replace('Plant Side', 'Supply') for field in fields] fields1 = [field.replace('Demand Side', 'Demand') for field in fields1] fields1 = [field[:field.find('Name') - 1] for field in fields1] fields1 = [field.replace(' Node', '') for field in fields1] fields1 = [field.replace(' List', 's') for field in fields1] # TODO : pop connectors if no parallel branches # make fieldnames in the plant loop fieldnames = ['%s %s' % (loopname, field) for field in fields1] for fieldname, thefield in zip(fieldnames, flnames): newplantloop[thefield] = fieldname # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # make the branch lists for this plant loop sbranchlist = idf.newidfobject( "BRANCHLIST", Name=newplantloop.Plant_Side_Branch_List_Name) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- dbranchlist = idf.newidfobject( "BRANCHLIST", Name=newplantloop.Demand_Side_Branch_List_Name) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # add branch names to the branchlist sbranchnames = flattencopy(sloop) # sbranchnames = sloop[1] for branchname in sbranchnames: sbranchlist.obj.append(branchname) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- dbranchnames = flattencopy(dloop) # dbranchnames = dloop[1] for branchname in dbranchnames: dbranchlist.obj.append(branchname) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # make a pipe branch for all branches in the loop # supply side sbranchs = [] for bname in sbranchnames: branch = makepipebranch(idf, bname) sbranchs.append(branch) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop anode = "Component_1_Inlet_Node_Name" sameinnode = "Plant_Side_Inlet_Node_Name" sbranchs[0][anode] = newplantloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Plant_Side_Outlet_Node_Name" sbranchs[-1][anode] = newplantloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in pipe pname = sbranchs[0]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Inlet_Node_Name = newplantloop[sameinnode] pname = sbranchs[-1]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Outlet_Node_Name = newplantloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # demand side dbranchs = [] for bname in dbranchnames: branch = makepipebranch(idf, bname) dbranchs.append(branch) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in branch anode = "Component_1_Inlet_Node_Name" sameinnode = "Demand_Side_Inlet_Node_Name" dbranchs[0][anode] = newplantloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Demand_Side_Outlet_Node_Name" dbranchs[-1][anode] = newplantloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in pipe pname = dbranchs[0]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Inlet_Node_Name = newplantloop[sameinnode] pname = dbranchs[-1]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Outlet_Node_Name = newplantloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # TODO : test if there are parallel branches # make the connectorlist an fill fields sconnlist = idf.newidfobject( "CONNECTORLIST", Name=newplantloop.Plant_Side_Connector_List_Name) sconnlist.Connector_1_Object_Type = "Connector:Splitter" sconnlist.Connector_1_Name = "%s_supply_splitter" % (loopname,) sconnlist.Connector_2_Object_Type = "Connector:Mixer" sconnlist.Connector_2_Name = "%s_supply_mixer" % (loopname,) dconnlist = idf.newidfobject( "CONNECTORLIST", Name=newplantloop.Demand_Side_Connector_List_Name) dconnlist.Connector_1_Object_Type = "Connector:Splitter" dconnlist.Connector_1_Name = "%s_demand_splitter" % (loopname,) dconnlist.Connector_2_Object_Type = "Connector:Mixer" dconnlist.Connector_2_Name = "%s_demand_mixer" % (loopname,) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- # make splitters and mixers s_splitter = idf.newidfobject( "CONNECTOR:SPLITTER", Name=sconnlist.Connector_1_Name) s_splitter.obj.extend([sloop[0]] + sloop[1]) s_mixer = idf.newidfobject( "CONNECTOR:MIXER", Name=sconnlist.Connector_2_Name) s_mixer.obj.extend([sloop[-1]] + sloop[1]) # - d_splitter = idf.newidfobject( "CONNECTOR:SPLITTER", Name=dconnlist.Connector_1_Name) d_splitter.obj.extend([dloop[0]] + dloop[1]) d_mixer = idf.newidfobject( "CONNECTOR:MIXER", Name=dconnlist.Connector_2_Name) d_mixer.obj.extend([dloop[-1]] + dloop[1]) # -------- <testing --------- testn = doingtesting(testing, testn, newplantloop) if testn == None: returnnone() # -------- testing> --------- return newplantloop
[ "def", "makeplantloop", "(", "idf", ",", "loopname", ",", "sloop", ",", "dloop", ",", "testing", "=", "None", ")", ":", "# -------- <testing ---------", "testn", "=", "0", "# -------- testing> ---------", "newplantloop", "=", "idf", ".", "newidfobject", "(", "\"PLANTLOOP\"", ",", "Name", "=", "loopname", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "fields", "=", "SomeFields", ".", "p_fields", "# for use in bunch", "flnames", "=", "[", "field", ".", "replace", "(", "' '", ",", "'_'", ")", "for", "field", "in", "fields", "]", "# simplify naming", "fields1", "=", "[", "field", ".", "replace", "(", "'Plant Side'", ",", "'Supply'", ")", "for", "field", "in", "fields", "]", "fields1", "=", "[", "field", ".", "replace", "(", "'Demand Side'", ",", "'Demand'", ")", "for", "field", "in", "fields1", "]", "fields1", "=", "[", "field", "[", ":", "field", ".", "find", "(", "'Name'", ")", "-", "1", "]", "for", "field", "in", "fields1", "]", "fields1", "=", "[", "field", ".", "replace", "(", "' Node'", ",", "''", ")", "for", "field", "in", "fields1", "]", "fields1", "=", "[", "field", ".", "replace", "(", "' List'", ",", "'s'", ")", "for", "field", "in", "fields1", "]", "# TODO : pop connectors if no parallel branches", "# make fieldnames in the plant loop", "fieldnames", "=", "[", "'%s %s'", "%", "(", "loopname", ",", "field", ")", "for", "field", "in", "fields1", "]", "for", "fieldname", ",", "thefield", "in", "zip", "(", "fieldnames", ",", "flnames", ")", ":", "newplantloop", "[", "thefield", "]", "=", "fieldname", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# make the branch lists for this plant loop", "sbranchlist", "=", "idf", ".", "newidfobject", "(", "\"BRANCHLIST\"", ",", "Name", "=", "newplantloop", ".", "Plant_Side_Branch_List_Name", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "dbranchlist", "=", "idf", ".", "newidfobject", "(", "\"BRANCHLIST\"", ",", "Name", "=", "newplantloop", ".", "Demand_Side_Branch_List_Name", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# add branch names to the branchlist", "sbranchnames", "=", "flattencopy", "(", "sloop", ")", "# sbranchnames = sloop[1]", "for", "branchname", "in", "sbranchnames", ":", "sbranchlist", ".", "obj", ".", "append", "(", "branchname", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "dbranchnames", "=", "flattencopy", "(", "dloop", ")", "# dbranchnames = dloop[1]", "for", "branchname", "in", "dbranchnames", ":", "dbranchlist", ".", "obj", ".", "append", "(", "branchname", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# make a pipe branch for all branches in the loop", "# supply side", "sbranchs", "=", "[", "]", "for", "bname", "in", "sbranchnames", ":", "branch", "=", "makepipebranch", "(", "idf", ",", "bname", ")", "sbranchs", ".", "append", "(", "branch", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# rename inlet outlet of endpoints of loop", "anode", "=", "\"Component_1_Inlet_Node_Name\"", "sameinnode", "=", "\"Plant_Side_Inlet_Node_Name\"", "sbranchs", "[", "0", "]", "[", "anode", "]", "=", "newplantloop", "[", "sameinnode", "]", "anode", "=", "\"Component_1_Outlet_Node_Name\"", "sameoutnode", "=", "\"Plant_Side_Outlet_Node_Name\"", "sbranchs", "[", "-", "1", "]", "[", "anode", "]", "=", "newplantloop", "[", "sameoutnode", "]", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# rename inlet outlet of endpoints of loop - rename in pipe", "pname", "=", "sbranchs", "[", "0", "]", "[", "'Component_1_Name'", "]", "# get the pipe name", "apipe", "=", "idf", ".", "getobject", "(", "'Pipe:Adiabatic'", ".", "upper", "(", ")", ",", "pname", ")", "# get pipe", "apipe", ".", "Inlet_Node_Name", "=", "newplantloop", "[", "sameinnode", "]", "pname", "=", "sbranchs", "[", "-", "1", "]", "[", "'Component_1_Name'", "]", "# get the pipe name", "apipe", "=", "idf", ".", "getobject", "(", "'Pipe:Adiabatic'", ".", "upper", "(", ")", ",", "pname", ")", "# get pipe", "apipe", ".", "Outlet_Node_Name", "=", "newplantloop", "[", "sameoutnode", "]", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# demand side", "dbranchs", "=", "[", "]", "for", "bname", "in", "dbranchnames", ":", "branch", "=", "makepipebranch", "(", "idf", ",", "bname", ")", "dbranchs", ".", "append", "(", "branch", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# rename inlet outlet of endpoints of loop - rename in branch", "anode", "=", "\"Component_1_Inlet_Node_Name\"", "sameinnode", "=", "\"Demand_Side_Inlet_Node_Name\"", "dbranchs", "[", "0", "]", "[", "anode", "]", "=", "newplantloop", "[", "sameinnode", "]", "anode", "=", "\"Component_1_Outlet_Node_Name\"", "sameoutnode", "=", "\"Demand_Side_Outlet_Node_Name\"", "dbranchs", "[", "-", "1", "]", "[", "anode", "]", "=", "newplantloop", "[", "sameoutnode", "]", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# rename inlet outlet of endpoints of loop - rename in pipe", "pname", "=", "dbranchs", "[", "0", "]", "[", "'Component_1_Name'", "]", "# get the pipe name", "apipe", "=", "idf", ".", "getobject", "(", "'Pipe:Adiabatic'", ".", "upper", "(", ")", ",", "pname", ")", "# get pipe", "apipe", ".", "Inlet_Node_Name", "=", "newplantloop", "[", "sameinnode", "]", "pname", "=", "dbranchs", "[", "-", "1", "]", "[", "'Component_1_Name'", "]", "# get the pipe name", "apipe", "=", "idf", ".", "getobject", "(", "'Pipe:Adiabatic'", ".", "upper", "(", ")", ",", "pname", ")", "# get pipe", "apipe", ".", "Outlet_Node_Name", "=", "newplantloop", "[", "sameoutnode", "]", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# TODO : test if there are parallel branches", "# make the connectorlist an fill fields", "sconnlist", "=", "idf", ".", "newidfobject", "(", "\"CONNECTORLIST\"", ",", "Name", "=", "newplantloop", ".", "Plant_Side_Connector_List_Name", ")", "sconnlist", ".", "Connector_1_Object_Type", "=", "\"Connector:Splitter\"", "sconnlist", ".", "Connector_1_Name", "=", "\"%s_supply_splitter\"", "%", "(", "loopname", ",", ")", "sconnlist", ".", "Connector_2_Object_Type", "=", "\"Connector:Mixer\"", "sconnlist", ".", "Connector_2_Name", "=", "\"%s_supply_mixer\"", "%", "(", "loopname", ",", ")", "dconnlist", "=", "idf", ".", "newidfobject", "(", "\"CONNECTORLIST\"", ",", "Name", "=", "newplantloop", ".", "Demand_Side_Connector_List_Name", ")", "dconnlist", ".", "Connector_1_Object_Type", "=", "\"Connector:Splitter\"", "dconnlist", ".", "Connector_1_Name", "=", "\"%s_demand_splitter\"", "%", "(", "loopname", ",", ")", "dconnlist", ".", "Connector_2_Object_Type", "=", "\"Connector:Mixer\"", "dconnlist", ".", "Connector_2_Name", "=", "\"%s_demand_mixer\"", "%", "(", "loopname", ",", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# make splitters and mixers", "s_splitter", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:SPLITTER\"", ",", "Name", "=", "sconnlist", ".", "Connector_1_Name", ")", "s_splitter", ".", "obj", ".", "extend", "(", "[", "sloop", "[", "0", "]", "]", "+", "sloop", "[", "1", "]", ")", "s_mixer", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:MIXER\"", ",", "Name", "=", "sconnlist", ".", "Connector_2_Name", ")", "s_mixer", ".", "obj", ".", "extend", "(", "[", "sloop", "[", "-", "1", "]", "]", "+", "sloop", "[", "1", "]", ")", "# -", "d_splitter", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:SPLITTER\"", ",", "Name", "=", "dconnlist", ".", "Connector_1_Name", ")", "d_splitter", ".", "obj", ".", "extend", "(", "[", "dloop", "[", "0", "]", "]", "+", "dloop", "[", "1", "]", ")", "d_mixer", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:MIXER\"", ",", "Name", "=", "dconnlist", ".", "Connector_2_Name", ")", "d_mixer", ".", "obj", ".", "extend", "(", "[", "dloop", "[", "-", "1", "]", "]", "+", "dloop", "[", "1", "]", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newplantloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "return", "newplantloop" ]
make plant loop with pip components
[ "make", "plant", "loop", "with", "pip", "components" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L575-L765
santoshphilip/eppy
eppy/hvacbuilder.py
makecondenserloop
def makecondenserloop(idf, loopname, sloop, dloop, testing=None): """make condenser loop with pipe components""" # -------- <testing --------- testn = 0 # -------- testing> --------- newcondenserloop = idf.newidfobject("CondenserLoop".upper(), Name=loopname) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- fields = SomeFields.c_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] # simplify naming fields1 = [field.replace( 'Condenser Side', 'Cond_Supply') for field in fields] fields1 = [field.replace('Demand Side', 'Demand') for field in fields1] fields1 = [field[:field.find('Name') - 1] for field in fields1] fields1 = [field.replace(' Node', '') for field in fields1] fields1 = [field.replace(' List', 's') for field in fields1] # old TODO : pop connectors if no parallel branches # make fieldnames in the condenser loop fieldnames = ['%s %s' % (loopname, field) for field in fields1] for fieldname, thefield in zip(fieldnames, flnames): newcondenserloop[thefield] = fieldname # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # make the branch lists for this condenser loop sbranchlist = idf.newidfobject( "BRANCHLIST", Name=newcondenserloop.Condenser_Side_Branch_List_Name) dbranchlist = idf.newidfobject( "BRANCHLIST", Name=newcondenserloop.Condenser_Demand_Side_Branch_List_Name) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # add branch names to the branchlist sbranchnames = flattencopy(sloop) # sbranchnames = sloop[1] for branchname in sbranchnames: sbranchlist.obj.append(branchname) dbranchnames = flattencopy(dloop) # dbranchnames = dloop[1] for branchname in dbranchnames: dbranchlist.obj.append(branchname) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # make a pipe branch for all branches in the loop # supply side sbranchs = [] for bname in sbranchnames: branch = makepipebranch(idf, bname) sbranchs.append(branch) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop anode = "Component_1_Inlet_Node_Name" sameinnode = "Condenser_Side_Inlet_Node_Name" # TODO : change ? sbranchs[0][anode] = newcondenserloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Condenser_Side_Outlet_Node_Name" # TODO : change ? sbranchs[-1][anode] = newcondenserloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in pipe pname = sbranchs[0]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Inlet_Node_Name = newcondenserloop[sameinnode] pname = sbranchs[-1]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Outlet_Node_Name = newcondenserloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # demand side dbranchs = [] for bname in dbranchnames: branch = makepipebranch(idf, bname) dbranchs.append(branch) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in branch anode = "Component_1_Inlet_Node_Name" sameinnode = "Demand_Side_Inlet_Node_Name" # TODO : change ? dbranchs[0][anode] = newcondenserloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Demand_Side_Outlet_Node_Name" # TODO : change ? dbranchs[-1][anode] = newcondenserloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in pipe pname = dbranchs[0]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Inlet_Node_Name = newcondenserloop[sameinnode] pname = dbranchs[-1]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Outlet_Node_Name = newcondenserloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # TODO : test if there are parallel branches # make the connectorlist an fill fields sconnlist = idf.newidfobject( "CONNECTORLIST", Name=newcondenserloop.Condenser_Side_Connector_List_Name) sconnlist.Connector_1_Object_Type = "Connector:Splitter" sconnlist.Connector_1_Name = "%s_supply_splitter" % (loopname,) sconnlist.Connector_2_Object_Type = "Connector:Mixer" sconnlist.Connector_2_Name = "%s_supply_mixer" % (loopname,) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- dconnlist = idf.newidfobject( "CONNECTORLIST", Name=newcondenserloop.Condenser_Demand_Side_Connector_List_Name) dconnlist.Connector_1_Object_Type = "Connector:Splitter" dconnlist.Connector_1_Name = "%s_demand_splitter" % (loopname,) dconnlist.Connector_2_Object_Type = "Connector:Mixer" dconnlist.Connector_2_Name = "%s_demand_mixer" % (loopname,) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # make splitters and mixers s_splitter = idf.newidfobject( "CONNECTOR:SPLITTER", Name=sconnlist.Connector_1_Name) s_splitter.obj.extend([sloop[0]] + sloop[1]) s_mixer = idf.newidfobject( "CONNECTOR:MIXER", Name=sconnlist.Connector_2_Name) s_mixer.obj.extend([sloop[-1]] + sloop[1]) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # - d_splitter = idf.newidfobject( "CONNECTOR:SPLITTER", Name=dconnlist.Connector_1_Name) d_splitter.obj.extend([dloop[0]] + dloop[1]) d_mixer = idf.newidfobject( "CONNECTOR:MIXER", Name=dconnlist.Connector_2_Name) d_mixer.obj.extend([dloop[-1]] + dloop[1]) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- return newcondenserloop
python
def makecondenserloop(idf, loopname, sloop, dloop, testing=None): """make condenser loop with pipe components""" # -------- <testing --------- testn = 0 # -------- testing> --------- newcondenserloop = idf.newidfobject("CondenserLoop".upper(), Name=loopname) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- fields = SomeFields.c_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] # simplify naming fields1 = [field.replace( 'Condenser Side', 'Cond_Supply') for field in fields] fields1 = [field.replace('Demand Side', 'Demand') for field in fields1] fields1 = [field[:field.find('Name') - 1] for field in fields1] fields1 = [field.replace(' Node', '') for field in fields1] fields1 = [field.replace(' List', 's') for field in fields1] # old TODO : pop connectors if no parallel branches # make fieldnames in the condenser loop fieldnames = ['%s %s' % (loopname, field) for field in fields1] for fieldname, thefield in zip(fieldnames, flnames): newcondenserloop[thefield] = fieldname # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # make the branch lists for this condenser loop sbranchlist = idf.newidfobject( "BRANCHLIST", Name=newcondenserloop.Condenser_Side_Branch_List_Name) dbranchlist = idf.newidfobject( "BRANCHLIST", Name=newcondenserloop.Condenser_Demand_Side_Branch_List_Name) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # add branch names to the branchlist sbranchnames = flattencopy(sloop) # sbranchnames = sloop[1] for branchname in sbranchnames: sbranchlist.obj.append(branchname) dbranchnames = flattencopy(dloop) # dbranchnames = dloop[1] for branchname in dbranchnames: dbranchlist.obj.append(branchname) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # make a pipe branch for all branches in the loop # supply side sbranchs = [] for bname in sbranchnames: branch = makepipebranch(idf, bname) sbranchs.append(branch) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop anode = "Component_1_Inlet_Node_Name" sameinnode = "Condenser_Side_Inlet_Node_Name" # TODO : change ? sbranchs[0][anode] = newcondenserloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Condenser_Side_Outlet_Node_Name" # TODO : change ? sbranchs[-1][anode] = newcondenserloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in pipe pname = sbranchs[0]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Inlet_Node_Name = newcondenserloop[sameinnode] pname = sbranchs[-1]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Outlet_Node_Name = newcondenserloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # demand side dbranchs = [] for bname in dbranchnames: branch = makepipebranch(idf, bname) dbranchs.append(branch) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in branch anode = "Component_1_Inlet_Node_Name" sameinnode = "Demand_Side_Inlet_Node_Name" # TODO : change ? dbranchs[0][anode] = newcondenserloop[sameinnode] anode = "Component_1_Outlet_Node_Name" sameoutnode = "Demand_Side_Outlet_Node_Name" # TODO : change ? dbranchs[-1][anode] = newcondenserloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # rename inlet outlet of endpoints of loop - rename in pipe pname = dbranchs[0]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Inlet_Node_Name = newcondenserloop[sameinnode] pname = dbranchs[-1]['Component_1_Name'] # get the pipe name apipe = idf.getobject('Pipe:Adiabatic'.upper(), pname) # get pipe apipe.Outlet_Node_Name = newcondenserloop[sameoutnode] # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # TODO : test if there are parallel branches # make the connectorlist an fill fields sconnlist = idf.newidfobject( "CONNECTORLIST", Name=newcondenserloop.Condenser_Side_Connector_List_Name) sconnlist.Connector_1_Object_Type = "Connector:Splitter" sconnlist.Connector_1_Name = "%s_supply_splitter" % (loopname,) sconnlist.Connector_2_Object_Type = "Connector:Mixer" sconnlist.Connector_2_Name = "%s_supply_mixer" % (loopname,) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- dconnlist = idf.newidfobject( "CONNECTORLIST", Name=newcondenserloop.Condenser_Demand_Side_Connector_List_Name) dconnlist.Connector_1_Object_Type = "Connector:Splitter" dconnlist.Connector_1_Name = "%s_demand_splitter" % (loopname,) dconnlist.Connector_2_Object_Type = "Connector:Mixer" dconnlist.Connector_2_Name = "%s_demand_mixer" % (loopname,) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # make splitters and mixers s_splitter = idf.newidfobject( "CONNECTOR:SPLITTER", Name=sconnlist.Connector_1_Name) s_splitter.obj.extend([sloop[0]] + sloop[1]) s_mixer = idf.newidfobject( "CONNECTOR:MIXER", Name=sconnlist.Connector_2_Name) s_mixer.obj.extend([sloop[-1]] + sloop[1]) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- # - d_splitter = idf.newidfobject( "CONNECTOR:SPLITTER", Name=dconnlist.Connector_1_Name) d_splitter.obj.extend([dloop[0]] + dloop[1]) d_mixer = idf.newidfobject( "CONNECTOR:MIXER", Name=dconnlist.Connector_2_Name) d_mixer.obj.extend([dloop[-1]] + dloop[1]) # -------- <testing --------- testn = doingtesting(testing, testn, newcondenserloop) if testn == None: returnnone() # -------- testing> --------- return newcondenserloop
[ "def", "makecondenserloop", "(", "idf", ",", "loopname", ",", "sloop", ",", "dloop", ",", "testing", "=", "None", ")", ":", "# -------- <testing ---------", "testn", "=", "0", "# -------- testing> ---------", "newcondenserloop", "=", "idf", ".", "newidfobject", "(", "\"CondenserLoop\"", ".", "upper", "(", ")", ",", "Name", "=", "loopname", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "fields", "=", "SomeFields", ".", "c_fields", "# for use in bunch", "flnames", "=", "[", "field", ".", "replace", "(", "' '", ",", "'_'", ")", "for", "field", "in", "fields", "]", "# simplify naming", "fields1", "=", "[", "field", ".", "replace", "(", "'Condenser Side'", ",", "'Cond_Supply'", ")", "for", "field", "in", "fields", "]", "fields1", "=", "[", "field", ".", "replace", "(", "'Demand Side'", ",", "'Demand'", ")", "for", "field", "in", "fields1", "]", "fields1", "=", "[", "field", "[", ":", "field", ".", "find", "(", "'Name'", ")", "-", "1", "]", "for", "field", "in", "fields1", "]", "fields1", "=", "[", "field", ".", "replace", "(", "' Node'", ",", "''", ")", "for", "field", "in", "fields1", "]", "fields1", "=", "[", "field", ".", "replace", "(", "' List'", ",", "'s'", ")", "for", "field", "in", "fields1", "]", "# old TODO : pop connectors if no parallel branches", "# make fieldnames in the condenser loop", "fieldnames", "=", "[", "'%s %s'", "%", "(", "loopname", ",", "field", ")", "for", "field", "in", "fields1", "]", "for", "fieldname", ",", "thefield", "in", "zip", "(", "fieldnames", ",", "flnames", ")", ":", "newcondenserloop", "[", "thefield", "]", "=", "fieldname", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# make the branch lists for this condenser loop", "sbranchlist", "=", "idf", ".", "newidfobject", "(", "\"BRANCHLIST\"", ",", "Name", "=", "newcondenserloop", ".", "Condenser_Side_Branch_List_Name", ")", "dbranchlist", "=", "idf", ".", "newidfobject", "(", "\"BRANCHLIST\"", ",", "Name", "=", "newcondenserloop", ".", "Condenser_Demand_Side_Branch_List_Name", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# add branch names to the branchlist", "sbranchnames", "=", "flattencopy", "(", "sloop", ")", "# sbranchnames = sloop[1]", "for", "branchname", "in", "sbranchnames", ":", "sbranchlist", ".", "obj", ".", "append", "(", "branchname", ")", "dbranchnames", "=", "flattencopy", "(", "dloop", ")", "# dbranchnames = dloop[1]", "for", "branchname", "in", "dbranchnames", ":", "dbranchlist", ".", "obj", ".", "append", "(", "branchname", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# make a pipe branch for all branches in the loop", "# supply side", "sbranchs", "=", "[", "]", "for", "bname", "in", "sbranchnames", ":", "branch", "=", "makepipebranch", "(", "idf", ",", "bname", ")", "sbranchs", ".", "append", "(", "branch", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# rename inlet outlet of endpoints of loop", "anode", "=", "\"Component_1_Inlet_Node_Name\"", "sameinnode", "=", "\"Condenser_Side_Inlet_Node_Name\"", "# TODO : change ?", "sbranchs", "[", "0", "]", "[", "anode", "]", "=", "newcondenserloop", "[", "sameinnode", "]", "anode", "=", "\"Component_1_Outlet_Node_Name\"", "sameoutnode", "=", "\"Condenser_Side_Outlet_Node_Name\"", "# TODO : change ?", "sbranchs", "[", "-", "1", "]", "[", "anode", "]", "=", "newcondenserloop", "[", "sameoutnode", "]", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# rename inlet outlet of endpoints of loop - rename in pipe", "pname", "=", "sbranchs", "[", "0", "]", "[", "'Component_1_Name'", "]", "# get the pipe name", "apipe", "=", "idf", ".", "getobject", "(", "'Pipe:Adiabatic'", ".", "upper", "(", ")", ",", "pname", ")", "# get pipe", "apipe", ".", "Inlet_Node_Name", "=", "newcondenserloop", "[", "sameinnode", "]", "pname", "=", "sbranchs", "[", "-", "1", "]", "[", "'Component_1_Name'", "]", "# get the pipe name", "apipe", "=", "idf", ".", "getobject", "(", "'Pipe:Adiabatic'", ".", "upper", "(", ")", ",", "pname", ")", "# get pipe", "apipe", ".", "Outlet_Node_Name", "=", "newcondenserloop", "[", "sameoutnode", "]", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# demand side", "dbranchs", "=", "[", "]", "for", "bname", "in", "dbranchnames", ":", "branch", "=", "makepipebranch", "(", "idf", ",", "bname", ")", "dbranchs", ".", "append", "(", "branch", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# rename inlet outlet of endpoints of loop - rename in branch", "anode", "=", "\"Component_1_Inlet_Node_Name\"", "sameinnode", "=", "\"Demand_Side_Inlet_Node_Name\"", "# TODO : change ?", "dbranchs", "[", "0", "]", "[", "anode", "]", "=", "newcondenserloop", "[", "sameinnode", "]", "anode", "=", "\"Component_1_Outlet_Node_Name\"", "sameoutnode", "=", "\"Demand_Side_Outlet_Node_Name\"", "# TODO : change ?", "dbranchs", "[", "-", "1", "]", "[", "anode", "]", "=", "newcondenserloop", "[", "sameoutnode", "]", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# rename inlet outlet of endpoints of loop - rename in pipe", "pname", "=", "dbranchs", "[", "0", "]", "[", "'Component_1_Name'", "]", "# get the pipe name", "apipe", "=", "idf", ".", "getobject", "(", "'Pipe:Adiabatic'", ".", "upper", "(", ")", ",", "pname", ")", "# get pipe", "apipe", ".", "Inlet_Node_Name", "=", "newcondenserloop", "[", "sameinnode", "]", "pname", "=", "dbranchs", "[", "-", "1", "]", "[", "'Component_1_Name'", "]", "# get the pipe name", "apipe", "=", "idf", ".", "getobject", "(", "'Pipe:Adiabatic'", ".", "upper", "(", ")", ",", "pname", ")", "# get pipe", "apipe", ".", "Outlet_Node_Name", "=", "newcondenserloop", "[", "sameoutnode", "]", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# TODO : test if there are parallel branches", "# make the connectorlist an fill fields", "sconnlist", "=", "idf", ".", "newidfobject", "(", "\"CONNECTORLIST\"", ",", "Name", "=", "newcondenserloop", ".", "Condenser_Side_Connector_List_Name", ")", "sconnlist", ".", "Connector_1_Object_Type", "=", "\"Connector:Splitter\"", "sconnlist", ".", "Connector_1_Name", "=", "\"%s_supply_splitter\"", "%", "(", "loopname", ",", ")", "sconnlist", ".", "Connector_2_Object_Type", "=", "\"Connector:Mixer\"", "sconnlist", ".", "Connector_2_Name", "=", "\"%s_supply_mixer\"", "%", "(", "loopname", ",", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "dconnlist", "=", "idf", ".", "newidfobject", "(", "\"CONNECTORLIST\"", ",", "Name", "=", "newcondenserloop", ".", "Condenser_Demand_Side_Connector_List_Name", ")", "dconnlist", ".", "Connector_1_Object_Type", "=", "\"Connector:Splitter\"", "dconnlist", ".", "Connector_1_Name", "=", "\"%s_demand_splitter\"", "%", "(", "loopname", ",", ")", "dconnlist", ".", "Connector_2_Object_Type", "=", "\"Connector:Mixer\"", "dconnlist", ".", "Connector_2_Name", "=", "\"%s_demand_mixer\"", "%", "(", "loopname", ",", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# make splitters and mixers", "s_splitter", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:SPLITTER\"", ",", "Name", "=", "sconnlist", ".", "Connector_1_Name", ")", "s_splitter", ".", "obj", ".", "extend", "(", "[", "sloop", "[", "0", "]", "]", "+", "sloop", "[", "1", "]", ")", "s_mixer", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:MIXER\"", ",", "Name", "=", "sconnlist", ".", "Connector_2_Name", ")", "s_mixer", ".", "obj", ".", "extend", "(", "[", "sloop", "[", "-", "1", "]", "]", "+", "sloop", "[", "1", "]", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "# -", "d_splitter", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:SPLITTER\"", ",", "Name", "=", "dconnlist", ".", "Connector_1_Name", ")", "d_splitter", ".", "obj", ".", "extend", "(", "[", "dloop", "[", "0", "]", "]", "+", "dloop", "[", "1", "]", ")", "d_mixer", "=", "idf", ".", "newidfobject", "(", "\"CONNECTOR:MIXER\"", ",", "Name", "=", "dconnlist", ".", "Connector_2_Name", ")", "d_mixer", ".", "obj", ".", "extend", "(", "[", "dloop", "[", "-", "1", "]", "]", "+", "dloop", "[", "1", "]", ")", "# -------- <testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ",", "newcondenserloop", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing> ---------", "return", "newcondenserloop" ]
make condenser loop with pipe components
[ "make", "condenser", "loop", "with", "pipe", "components" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L767-L960
santoshphilip/eppy
eppy/hvacbuilder.py
_clean_listofcomponents
def _clean_listofcomponents(listofcomponents): """force it to be a list of tuples""" def totuple(item): """return a tuple""" if isinstance(item, (tuple, list)): return item else: return (item, None) return [totuple(item) for item in listofcomponents]
python
def _clean_listofcomponents(listofcomponents): """force it to be a list of tuples""" def totuple(item): """return a tuple""" if isinstance(item, (tuple, list)): return item else: return (item, None) return [totuple(item) for item in listofcomponents]
[ "def", "_clean_listofcomponents", "(", "listofcomponents", ")", ":", "def", "totuple", "(", "item", ")", ":", "\"\"\"return a tuple\"\"\"", "if", "isinstance", "(", "item", ",", "(", "tuple", ",", "list", ")", ")", ":", "return", "item", "else", ":", "return", "(", "item", ",", "None", ")", "return", "[", "totuple", "(", "item", ")", "for", "item", "in", "listofcomponents", "]" ]
force it to be a list of tuples
[ "force", "it", "to", "be", "a", "list", "of", "tuples" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L962-L970
santoshphilip/eppy
eppy/hvacbuilder.py
_clean_listofcomponents_tuples
def _clean_listofcomponents_tuples(listofcomponents_tuples): """force 3 items in the tuple""" def to3tuple(item): """return a 3 item tuple""" if len(item) == 3: return item else: return (item[0], item[1], None) return [to3tuple(item) for item in listofcomponents_tuples]
python
def _clean_listofcomponents_tuples(listofcomponents_tuples): """force 3 items in the tuple""" def to3tuple(item): """return a 3 item tuple""" if len(item) == 3: return item else: return (item[0], item[1], None) return [to3tuple(item) for item in listofcomponents_tuples]
[ "def", "_clean_listofcomponents_tuples", "(", "listofcomponents_tuples", ")", ":", "def", "to3tuple", "(", "item", ")", ":", "\"\"\"return a 3 item tuple\"\"\"", "if", "len", "(", "item", ")", "==", "3", ":", "return", "item", "else", ":", "return", "(", "item", "[", "0", "]", ",", "item", "[", "1", "]", ",", "None", ")", "return", "[", "to3tuple", "(", "item", ")", "for", "item", "in", "listofcomponents_tuples", "]" ]
force 3 items in the tuple
[ "force", "3", "items", "in", "the", "tuple" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L972-L980
santoshphilip/eppy
eppy/hvacbuilder.py
getmakeidfobject
def getmakeidfobject(idf, key, name): """get idfobject or make it if it does not exist""" idfobject = idf.getobject(key, name) if not idfobject: return idf.newidfobject(key, Name=name) else: return idfobject
python
def getmakeidfobject(idf, key, name): """get idfobject or make it if it does not exist""" idfobject = idf.getobject(key, name) if not idfobject: return idf.newidfobject(key, Name=name) else: return idfobject
[ "def", "getmakeidfobject", "(", "idf", ",", "key", ",", "name", ")", ":", "idfobject", "=", "idf", ".", "getobject", "(", "key", ",", "name", ")", "if", "not", "idfobject", ":", "return", "idf", ".", "newidfobject", "(", "key", ",", "Name", "=", "name", ")", "else", ":", "return", "idfobject" ]
get idfobject or make it if it does not exist
[ "get", "idfobject", "or", "make", "it", "if", "it", "does", "not", "exist" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L982-L988
santoshphilip/eppy
eppy/hvacbuilder.py
replacebranch1
def replacebranch1(idf, loop, branchname, listofcomponents_tuples, fluid=None, debugsave=False): """do I even use this ? .... yup! I do""" if fluid is None: fluid = '' listofcomponents_tuples = _clean_listofcomponents_tuples(listofcomponents_tuples) branch = idf.getobject('BRANCH', branchname) # args are (key, name) listofcomponents = [] for comp_type, comp_name, compnode in listofcomponents_tuples: comp = getmakeidfobject(idf, comp_type.upper(), comp_name) listofcomponents.append((comp, compnode)) newbr = replacebranch(idf, loop, branch, listofcomponents, debugsave=debugsave, fluid=fluid) return newbr
python
def replacebranch1(idf, loop, branchname, listofcomponents_tuples, fluid=None, debugsave=False): """do I even use this ? .... yup! I do""" if fluid is None: fluid = '' listofcomponents_tuples = _clean_listofcomponents_tuples(listofcomponents_tuples) branch = idf.getobject('BRANCH', branchname) # args are (key, name) listofcomponents = [] for comp_type, comp_name, compnode in listofcomponents_tuples: comp = getmakeidfobject(idf, comp_type.upper(), comp_name) listofcomponents.append((comp, compnode)) newbr = replacebranch(idf, loop, branch, listofcomponents, debugsave=debugsave, fluid=fluid) return newbr
[ "def", "replacebranch1", "(", "idf", ",", "loop", ",", "branchname", ",", "listofcomponents_tuples", ",", "fluid", "=", "None", ",", "debugsave", "=", "False", ")", ":", "if", "fluid", "is", "None", ":", "fluid", "=", "''", "listofcomponents_tuples", "=", "_clean_listofcomponents_tuples", "(", "listofcomponents_tuples", ")", "branch", "=", "idf", ".", "getobject", "(", "'BRANCH'", ",", "branchname", ")", "# args are (key, name)", "listofcomponents", "=", "[", "]", "for", "comp_type", ",", "comp_name", ",", "compnode", "in", "listofcomponents_tuples", ":", "comp", "=", "getmakeidfobject", "(", "idf", ",", "comp_type", ".", "upper", "(", ")", ",", "comp_name", ")", "listofcomponents", ".", "append", "(", "(", "comp", ",", "compnode", ")", ")", "newbr", "=", "replacebranch", "(", "idf", ",", "loop", ",", "branch", ",", "listofcomponents", ",", "debugsave", "=", "debugsave", ",", "fluid", "=", "fluid", ")", "return", "newbr" ]
do I even use this ? .... yup! I do
[ "do", "I", "even", "use", "this", "?", "....", "yup!", "I", "do" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L990-L1003
santoshphilip/eppy
eppy/hvacbuilder.py
replacebranch
def replacebranch(idf, loop, branch, listofcomponents, fluid=None, debugsave=False, testing=None): """It will replace the components in the branch with components in listofcomponents""" if fluid is None: fluid = '' # -------- testing --------- testn = 0 # -------- testing --------- # join them into a branch # ----------------------- # np1_inlet -> np1 -> np1_np2_node -> np2 -> np2_outlet # change the node names in the component # empty the old branch # fill in the new components with the node names into this branch listofcomponents = _clean_listofcomponents(listofcomponents) components = [item[0] for item in listofcomponents] connectcomponents(idf, listofcomponents, fluid=fluid) if debugsave: idf.savecopy("hhh3.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- fields = SomeFields.a_fields thebranch = branch componentsintobranch(idf, thebranch, listofcomponents, fluid=fluid) if debugsave: idf.savecopy("hhh4.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- # # gather all renamed nodes # # do the renaming renamenodes(idf, 'node') if debugsave: idf.savecopy("hhh7.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- # check for the end nodes of the loop if loop.key == 'AIRLOOPHVAC': fields = SomeFields.a_fields if loop.key == 'PLANTLOOP': fields = SomeFields.p_fields if loop.key == 'CONDENSERLOOP': fields = SomeFields.c_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] if fluid.upper() == 'WATER': supplyconlistname = loop[flnames[3]] # Plant_Side_Connector_List_Name or Condenser_Side_Connector_List_Name elif fluid.upper() == 'AIR': supplyconlistname = loop[flnames[1]] # Connector_List_Name' supplyconlist = idf.getobject('CONNECTORLIST', supplyconlistname) for i in range(1, 100000): # large range to hit end try: fieldname = 'Connector_%s_Object_Type' % (i,) ctype = supplyconlist[fieldname] except bunch_subclass.BadEPFieldError: break if ctype.strip() == '': break fieldname = 'Connector_%s_Name' % (i,) cname = supplyconlist[fieldname] connector = idf.getobject(ctype.upper(), cname) if connector.key == 'CONNECTOR:SPLITTER': firstbranchname = connector.Inlet_Branch_Name cbranchname = firstbranchname isfirst = True if connector.key == 'CONNECTOR:MIXER': lastbranchname = connector.Outlet_Branch_Name cbranchname = lastbranchname isfirst = False if cbranchname == thebranch.Name: # rename end nodes comps = getbranchcomponents(idf, thebranch) if isfirst: comp = comps[0] inletnodename = getnodefieldname( comp, "Inlet_Node_Name", fluid) comp[inletnodename] = [ comp[inletnodename], loop[flnames[0]]] # Plant_Side_Inlet_Node_Name else: comp = comps[-1] outletnodename = getnodefieldname( comp, "Outlet_Node_Name", fluid) comp[outletnodename] = [ comp[outletnodename], loop[flnames[1]]] # .Plant_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if fluid.upper() == 'WATER': demandconlistname = loop[flnames[7]] # .Demand_Side_Connector_List_Name demandconlist = idf.getobject('CONNECTORLIST', demandconlistname) for i in range(1, 100000): # large range to hit end try: fieldname = 'Connector_%s_Object_Type' % (i,) ctype = demandconlist[fieldname] except bunch_subclass.BadEPFieldError: break if ctype.strip() == '': break fieldname = 'Connector_%s_Name' % (i,) cname = demandconlist[fieldname] connector = idf.getobject(ctype.upper(), cname) if connector.key == 'CONNECTOR:SPLITTER': firstbranchname = connector.Inlet_Branch_Name cbranchname = firstbranchname isfirst = True if connector.key == 'CONNECTOR:MIXER': lastbranchname = connector.Outlet_Branch_Name cbranchname = lastbranchname isfirst = False if cbranchname == thebranch.Name: # rename end nodes comps = getbranchcomponents(idf, thebranch) if isfirst: comp = comps[0] inletnodename = getnodefieldname( comp, "Inlet_Node_Name", fluid) comp[inletnodename] = [ comp[inletnodename], loop[flnames[4]]] # .Demand_Side_Inlet_Node_Name if not isfirst: comp = comps[-1] outletnodename = getnodefieldname( comp, "Outlet_Node_Name", fluid) comp[outletnodename] = [ comp[outletnodename], loop[flnames[5]]] # .Demand_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if debugsave: idf.savecopy("hhh8.idf") # # gather all renamed nodes # # do the renaming renamenodes(idf, 'node') # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if debugsave: idf.savecopy("hhh9.idf") return thebranch
python
def replacebranch(idf, loop, branch, listofcomponents, fluid=None, debugsave=False, testing=None): """It will replace the components in the branch with components in listofcomponents""" if fluid is None: fluid = '' # -------- testing --------- testn = 0 # -------- testing --------- # join them into a branch # ----------------------- # np1_inlet -> np1 -> np1_np2_node -> np2 -> np2_outlet # change the node names in the component # empty the old branch # fill in the new components with the node names into this branch listofcomponents = _clean_listofcomponents(listofcomponents) components = [item[0] for item in listofcomponents] connectcomponents(idf, listofcomponents, fluid=fluid) if debugsave: idf.savecopy("hhh3.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- fields = SomeFields.a_fields thebranch = branch componentsintobranch(idf, thebranch, listofcomponents, fluid=fluid) if debugsave: idf.savecopy("hhh4.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- # # gather all renamed nodes # # do the renaming renamenodes(idf, 'node') if debugsave: idf.savecopy("hhh7.idf") # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- # check for the end nodes of the loop if loop.key == 'AIRLOOPHVAC': fields = SomeFields.a_fields if loop.key == 'PLANTLOOP': fields = SomeFields.p_fields if loop.key == 'CONDENSERLOOP': fields = SomeFields.c_fields # for use in bunch flnames = [field.replace(' ', '_') for field in fields] if fluid.upper() == 'WATER': supplyconlistname = loop[flnames[3]] # Plant_Side_Connector_List_Name or Condenser_Side_Connector_List_Name elif fluid.upper() == 'AIR': supplyconlistname = loop[flnames[1]] # Connector_List_Name' supplyconlist = idf.getobject('CONNECTORLIST', supplyconlistname) for i in range(1, 100000): # large range to hit end try: fieldname = 'Connector_%s_Object_Type' % (i,) ctype = supplyconlist[fieldname] except bunch_subclass.BadEPFieldError: break if ctype.strip() == '': break fieldname = 'Connector_%s_Name' % (i,) cname = supplyconlist[fieldname] connector = idf.getobject(ctype.upper(), cname) if connector.key == 'CONNECTOR:SPLITTER': firstbranchname = connector.Inlet_Branch_Name cbranchname = firstbranchname isfirst = True if connector.key == 'CONNECTOR:MIXER': lastbranchname = connector.Outlet_Branch_Name cbranchname = lastbranchname isfirst = False if cbranchname == thebranch.Name: # rename end nodes comps = getbranchcomponents(idf, thebranch) if isfirst: comp = comps[0] inletnodename = getnodefieldname( comp, "Inlet_Node_Name", fluid) comp[inletnodename] = [ comp[inletnodename], loop[flnames[0]]] # Plant_Side_Inlet_Node_Name else: comp = comps[-1] outletnodename = getnodefieldname( comp, "Outlet_Node_Name", fluid) comp[outletnodename] = [ comp[outletnodename], loop[flnames[1]]] # .Plant_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if fluid.upper() == 'WATER': demandconlistname = loop[flnames[7]] # .Demand_Side_Connector_List_Name demandconlist = idf.getobject('CONNECTORLIST', demandconlistname) for i in range(1, 100000): # large range to hit end try: fieldname = 'Connector_%s_Object_Type' % (i,) ctype = demandconlist[fieldname] except bunch_subclass.BadEPFieldError: break if ctype.strip() == '': break fieldname = 'Connector_%s_Name' % (i,) cname = demandconlist[fieldname] connector = idf.getobject(ctype.upper(), cname) if connector.key == 'CONNECTOR:SPLITTER': firstbranchname = connector.Inlet_Branch_Name cbranchname = firstbranchname isfirst = True if connector.key == 'CONNECTOR:MIXER': lastbranchname = connector.Outlet_Branch_Name cbranchname = lastbranchname isfirst = False if cbranchname == thebranch.Name: # rename end nodes comps = getbranchcomponents(idf, thebranch) if isfirst: comp = comps[0] inletnodename = getnodefieldname( comp, "Inlet_Node_Name", fluid) comp[inletnodename] = [ comp[inletnodename], loop[flnames[4]]] # .Demand_Side_Inlet_Node_Name if not isfirst: comp = comps[-1] outletnodename = getnodefieldname( comp, "Outlet_Node_Name", fluid) comp[outletnodename] = [ comp[outletnodename], loop[flnames[5]]] # .Demand_Side_Outlet_Node_Name # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if debugsave: idf.savecopy("hhh8.idf") # # gather all renamed nodes # # do the renaming renamenodes(idf, 'node') # -------- testing --------- testn = doingtesting(testing, testn) if testn == None: returnnone() # -------- testing --------- if debugsave: idf.savecopy("hhh9.idf") return thebranch
[ "def", "replacebranch", "(", "idf", ",", "loop", ",", "branch", ",", "listofcomponents", ",", "fluid", "=", "None", ",", "debugsave", "=", "False", ",", "testing", "=", "None", ")", ":", "if", "fluid", "is", "None", ":", "fluid", "=", "''", "# -------- testing ---------", "testn", "=", "0", "# -------- testing ---------", "# join them into a branch", "# -----------------------", "# np1_inlet -> np1 -> np1_np2_node -> np2 -> np2_outlet", "# change the node names in the component", "# empty the old branch", "# fill in the new components with the node names into this branch", "listofcomponents", "=", "_clean_listofcomponents", "(", "listofcomponents", ")", "components", "=", "[", "item", "[", "0", "]", "for", "item", "in", "listofcomponents", "]", "connectcomponents", "(", "idf", ",", "listofcomponents", ",", "fluid", "=", "fluid", ")", "if", "debugsave", ":", "idf", ".", "savecopy", "(", "\"hhh3.idf\"", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "fields", "=", "SomeFields", ".", "a_fields", "thebranch", "=", "branch", "componentsintobranch", "(", "idf", ",", "thebranch", ",", "listofcomponents", ",", "fluid", "=", "fluid", ")", "if", "debugsave", ":", "idf", ".", "savecopy", "(", "\"hhh4.idf\"", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# # gather all renamed nodes", "# # do the renaming", "renamenodes", "(", "idf", ",", "'node'", ")", "if", "debugsave", ":", "idf", ".", "savecopy", "(", "\"hhh7.idf\"", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "# check for the end nodes of the loop", "if", "loop", ".", "key", "==", "'AIRLOOPHVAC'", ":", "fields", "=", "SomeFields", ".", "a_fields", "if", "loop", ".", "key", "==", "'PLANTLOOP'", ":", "fields", "=", "SomeFields", ".", "p_fields", "if", "loop", ".", "key", "==", "'CONDENSERLOOP'", ":", "fields", "=", "SomeFields", ".", "c_fields", "# for use in bunch", "flnames", "=", "[", "field", ".", "replace", "(", "' '", ",", "'_'", ")", "for", "field", "in", "fields", "]", "if", "fluid", ".", "upper", "(", ")", "==", "'WATER'", ":", "supplyconlistname", "=", "loop", "[", "flnames", "[", "3", "]", "]", "# Plant_Side_Connector_List_Name or Condenser_Side_Connector_List_Name", "elif", "fluid", ".", "upper", "(", ")", "==", "'AIR'", ":", "supplyconlistname", "=", "loop", "[", "flnames", "[", "1", "]", "]", "# Connector_List_Name'", "supplyconlist", "=", "idf", ".", "getobject", "(", "'CONNECTORLIST'", ",", "supplyconlistname", ")", "for", "i", "in", "range", "(", "1", ",", "100000", ")", ":", "# large range to hit end", "try", ":", "fieldname", "=", "'Connector_%s_Object_Type'", "%", "(", "i", ",", ")", "ctype", "=", "supplyconlist", "[", "fieldname", "]", "except", "bunch_subclass", ".", "BadEPFieldError", ":", "break", "if", "ctype", ".", "strip", "(", ")", "==", "''", ":", "break", "fieldname", "=", "'Connector_%s_Name'", "%", "(", "i", ",", ")", "cname", "=", "supplyconlist", "[", "fieldname", "]", "connector", "=", "idf", ".", "getobject", "(", "ctype", ".", "upper", "(", ")", ",", "cname", ")", "if", "connector", ".", "key", "==", "'CONNECTOR:SPLITTER'", ":", "firstbranchname", "=", "connector", ".", "Inlet_Branch_Name", "cbranchname", "=", "firstbranchname", "isfirst", "=", "True", "if", "connector", ".", "key", "==", "'CONNECTOR:MIXER'", ":", "lastbranchname", "=", "connector", ".", "Outlet_Branch_Name", "cbranchname", "=", "lastbranchname", "isfirst", "=", "False", "if", "cbranchname", "==", "thebranch", ".", "Name", ":", "# rename end nodes", "comps", "=", "getbranchcomponents", "(", "idf", ",", "thebranch", ")", "if", "isfirst", ":", "comp", "=", "comps", "[", "0", "]", "inletnodename", "=", "getnodefieldname", "(", "comp", ",", "\"Inlet_Node_Name\"", ",", "fluid", ")", "comp", "[", "inletnodename", "]", "=", "[", "comp", "[", "inletnodename", "]", ",", "loop", "[", "flnames", "[", "0", "]", "]", "]", "# Plant_Side_Inlet_Node_Name", "else", ":", "comp", "=", "comps", "[", "-", "1", "]", "outletnodename", "=", "getnodefieldname", "(", "comp", ",", "\"Outlet_Node_Name\"", ",", "fluid", ")", "comp", "[", "outletnodename", "]", "=", "[", "comp", "[", "outletnodename", "]", ",", "loop", "[", "flnames", "[", "1", "]", "]", "]", "# .Plant_Side_Outlet_Node_Name", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "if", "fluid", ".", "upper", "(", ")", "==", "'WATER'", ":", "demandconlistname", "=", "loop", "[", "flnames", "[", "7", "]", "]", "# .Demand_Side_Connector_List_Name", "demandconlist", "=", "idf", ".", "getobject", "(", "'CONNECTORLIST'", ",", "demandconlistname", ")", "for", "i", "in", "range", "(", "1", ",", "100000", ")", ":", "# large range to hit end", "try", ":", "fieldname", "=", "'Connector_%s_Object_Type'", "%", "(", "i", ",", ")", "ctype", "=", "demandconlist", "[", "fieldname", "]", "except", "bunch_subclass", ".", "BadEPFieldError", ":", "break", "if", "ctype", ".", "strip", "(", ")", "==", "''", ":", "break", "fieldname", "=", "'Connector_%s_Name'", "%", "(", "i", ",", ")", "cname", "=", "demandconlist", "[", "fieldname", "]", "connector", "=", "idf", ".", "getobject", "(", "ctype", ".", "upper", "(", ")", ",", "cname", ")", "if", "connector", ".", "key", "==", "'CONNECTOR:SPLITTER'", ":", "firstbranchname", "=", "connector", ".", "Inlet_Branch_Name", "cbranchname", "=", "firstbranchname", "isfirst", "=", "True", "if", "connector", ".", "key", "==", "'CONNECTOR:MIXER'", ":", "lastbranchname", "=", "connector", ".", "Outlet_Branch_Name", "cbranchname", "=", "lastbranchname", "isfirst", "=", "False", "if", "cbranchname", "==", "thebranch", ".", "Name", ":", "# rename end nodes", "comps", "=", "getbranchcomponents", "(", "idf", ",", "thebranch", ")", "if", "isfirst", ":", "comp", "=", "comps", "[", "0", "]", "inletnodename", "=", "getnodefieldname", "(", "comp", ",", "\"Inlet_Node_Name\"", ",", "fluid", ")", "comp", "[", "inletnodename", "]", "=", "[", "comp", "[", "inletnodename", "]", ",", "loop", "[", "flnames", "[", "4", "]", "]", "]", "# .Demand_Side_Inlet_Node_Name", "if", "not", "isfirst", ":", "comp", "=", "comps", "[", "-", "1", "]", "outletnodename", "=", "getnodefieldname", "(", "comp", ",", "\"Outlet_Node_Name\"", ",", "fluid", ")", "comp", "[", "outletnodename", "]", "=", "[", "comp", "[", "outletnodename", "]", ",", "loop", "[", "flnames", "[", "5", "]", "]", "]", "# .Demand_Side_Outlet_Node_Name", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "if", "debugsave", ":", "idf", ".", "savecopy", "(", "\"hhh8.idf\"", ")", "# # gather all renamed nodes", "# # do the renaming", "renamenodes", "(", "idf", ",", "'node'", ")", "# -------- testing ---------", "testn", "=", "doingtesting", "(", "testing", ",", "testn", ")", "if", "testn", "==", "None", ":", "returnnone", "(", ")", "# -------- testing ---------", "if", "debugsave", ":", "idf", ".", "savecopy", "(", "\"hhh9.idf\"", ")", "return", "thebranch" ]
It will replace the components in the branch with components in listofcomponents
[ "It", "will", "replace", "the", "components", "in", "the", "branch", "with", "components", "in", "listofcomponents" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L1005-L1178
santoshphilip/eppy
eppy/hvacbuilder.py
main
def main(): """the main routine""" from six import StringIO import eppy.iddv7 as iddv7 IDF.setiddname(StringIO(iddv7.iddtxt)) idf1 = IDF(StringIO('')) loopname = "p_loop" sloop = ['sb0', ['sb1', 'sb2', 'sb3'], 'sb4'] dloop = ['db0', ['db1', 'db2', 'db3'], 'db4'] # makeplantloop(idf1, loopname, sloop, dloop) loopname = "c_loop" sloop = ['sb0', ['sb1', 'sb2', 'sb3'], 'sb4'] dloop = ['db0', ['db1', 'db2', 'db3'], 'db4'] # makecondenserloop(idf1, loopname, sloop, dloop) loopname = "a_loop" sloop = ['sb0', ['sb1', 'sb2', 'sb3'], 'sb4'] dloop = ['zone1', 'zone2', 'zone3'] makeairloop(idf1, loopname, sloop, dloop) idf1.savecopy("hh1.idf")
python
def main(): """the main routine""" from six import StringIO import eppy.iddv7 as iddv7 IDF.setiddname(StringIO(iddv7.iddtxt)) idf1 = IDF(StringIO('')) loopname = "p_loop" sloop = ['sb0', ['sb1', 'sb2', 'sb3'], 'sb4'] dloop = ['db0', ['db1', 'db2', 'db3'], 'db4'] # makeplantloop(idf1, loopname, sloop, dloop) loopname = "c_loop" sloop = ['sb0', ['sb1', 'sb2', 'sb3'], 'sb4'] dloop = ['db0', ['db1', 'db2', 'db3'], 'db4'] # makecondenserloop(idf1, loopname, sloop, dloop) loopname = "a_loop" sloop = ['sb0', ['sb1', 'sb2', 'sb3'], 'sb4'] dloop = ['zone1', 'zone2', 'zone3'] makeairloop(idf1, loopname, sloop, dloop) idf1.savecopy("hh1.idf")
[ "def", "main", "(", ")", ":", "from", "six", "import", "StringIO", "import", "eppy", ".", "iddv7", "as", "iddv7", "IDF", ".", "setiddname", "(", "StringIO", "(", "iddv7", ".", "iddtxt", ")", ")", "idf1", "=", "IDF", "(", "StringIO", "(", "''", ")", ")", "loopname", "=", "\"p_loop\"", "sloop", "=", "[", "'sb0'", ",", "[", "'sb1'", ",", "'sb2'", ",", "'sb3'", "]", ",", "'sb4'", "]", "dloop", "=", "[", "'db0'", ",", "[", "'db1'", ",", "'db2'", ",", "'db3'", "]", ",", "'db4'", "]", "# makeplantloop(idf1, loopname, sloop, dloop)", "loopname", "=", "\"c_loop\"", "sloop", "=", "[", "'sb0'", ",", "[", "'sb1'", ",", "'sb2'", ",", "'sb3'", "]", ",", "'sb4'", "]", "dloop", "=", "[", "'db0'", ",", "[", "'db1'", ",", "'db2'", ",", "'db3'", "]", ",", "'db4'", "]", "# makecondenserloop(idf1, loopname, sloop, dloop)", "loopname", "=", "\"a_loop\"", "sloop", "=", "[", "'sb0'", ",", "[", "'sb1'", ",", "'sb2'", ",", "'sb3'", "]", ",", "'sb4'", "]", "dloop", "=", "[", "'zone1'", ",", "'zone2'", ",", "'zone3'", "]", "makeairloop", "(", "idf1", ",", "loopname", ",", "sloop", ",", "dloop", ")", "idf1", ".", "savecopy", "(", "\"hh1.idf\"", ")" ]
the main routine
[ "the", "main", "routine" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/hvacbuilder.py#L1180-L1198
santoshphilip/eppy
eppy/geometry/surface.py
area
def area(poly): """Area of a polygon poly""" if len(poly) < 3: # not a plane - no area return 0 total = [0, 0, 0] num = len(poly) for i in range(num): vi1 = poly[i] vi2 = poly[(i+1) % num] prod = np.cross(vi1, vi2) total[0] += prod[0] total[1] += prod[1] total[2] += prod[2] if total == [0, 0, 0]: # points are in a straight line - no area return 0 result = np.dot(total, unit_normal(poly[0], poly[1], poly[2])) return abs(result/2)
python
def area(poly): """Area of a polygon poly""" if len(poly) < 3: # not a plane - no area return 0 total = [0, 0, 0] num = len(poly) for i in range(num): vi1 = poly[i] vi2 = poly[(i+1) % num] prod = np.cross(vi1, vi2) total[0] += prod[0] total[1] += prod[1] total[2] += prod[2] if total == [0, 0, 0]: # points are in a straight line - no area return 0 result = np.dot(total, unit_normal(poly[0], poly[1], poly[2])) return abs(result/2)
[ "def", "area", "(", "poly", ")", ":", "if", "len", "(", "poly", ")", "<", "3", ":", "# not a plane - no area", "return", "0", "total", "=", "[", "0", ",", "0", ",", "0", "]", "num", "=", "len", "(", "poly", ")", "for", "i", "in", "range", "(", "num", ")", ":", "vi1", "=", "poly", "[", "i", "]", "vi2", "=", "poly", "[", "(", "i", "+", "1", ")", "%", "num", "]", "prod", "=", "np", ".", "cross", "(", "vi1", ",", "vi2", ")", "total", "[", "0", "]", "+=", "prod", "[", "0", "]", "total", "[", "1", "]", "+=", "prod", "[", "1", "]", "total", "[", "2", "]", "+=", "prod", "[", "2", "]", "if", "total", "==", "[", "0", ",", "0", ",", "0", "]", ":", "# points are in a straight line - no area", "return", "0", "result", "=", "np", ".", "dot", "(", "total", ",", "unit_normal", "(", "poly", "[", "0", "]", ",", "poly", "[", "1", "]", ",", "poly", "[", "2", "]", ")", ")", "return", "abs", "(", "result", "/", "2", ")" ]
Area of a polygon poly
[ "Area", "of", "a", "polygon", "poly" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/surface.py#L32-L48
santoshphilip/eppy
eppy/geometry/surface.py
unit_normal
def unit_normal(pt_a, pt_b, pt_c): """unit normal vector of plane defined by points pt_a, pt_b, and pt_c""" x_val = np.linalg.det([[1, pt_a[1], pt_a[2]], [1, pt_b[1], pt_b[2]], [1, pt_c[1], pt_c[2]]]) y_val = np.linalg.det([[pt_a[0], 1, pt_a[2]], [pt_b[0], 1, pt_b[2]], [pt_c[0], 1, pt_c[2]]]) z_val = np.linalg.det([[pt_a[0], pt_a[1], 1], [pt_b[0], pt_b[1], 1], [pt_c[0], pt_c[1], 1]]) magnitude = (x_val**2 + y_val**2 + z_val**2)**.5 mag = (x_val/magnitude, y_val/magnitude, z_val/magnitude) if magnitude < 0.00000001: mag = (0, 0, 0) return mag
python
def unit_normal(pt_a, pt_b, pt_c): """unit normal vector of plane defined by points pt_a, pt_b, and pt_c""" x_val = np.linalg.det([[1, pt_a[1], pt_a[2]], [1, pt_b[1], pt_b[2]], [1, pt_c[1], pt_c[2]]]) y_val = np.linalg.det([[pt_a[0], 1, pt_a[2]], [pt_b[0], 1, pt_b[2]], [pt_c[0], 1, pt_c[2]]]) z_val = np.linalg.det([[pt_a[0], pt_a[1], 1], [pt_b[0], pt_b[1], 1], [pt_c[0], pt_c[1], 1]]) magnitude = (x_val**2 + y_val**2 + z_val**2)**.5 mag = (x_val/magnitude, y_val/magnitude, z_val/magnitude) if magnitude < 0.00000001: mag = (0, 0, 0) return mag
[ "def", "unit_normal", "(", "pt_a", ",", "pt_b", ",", "pt_c", ")", ":", "x_val", "=", "np", ".", "linalg", ".", "det", "(", "[", "[", "1", ",", "pt_a", "[", "1", "]", ",", "pt_a", "[", "2", "]", "]", ",", "[", "1", ",", "pt_b", "[", "1", "]", ",", "pt_b", "[", "2", "]", "]", ",", "[", "1", ",", "pt_c", "[", "1", "]", ",", "pt_c", "[", "2", "]", "]", "]", ")", "y_val", "=", "np", ".", "linalg", ".", "det", "(", "[", "[", "pt_a", "[", "0", "]", ",", "1", ",", "pt_a", "[", "2", "]", "]", ",", "[", "pt_b", "[", "0", "]", ",", "1", ",", "pt_b", "[", "2", "]", "]", ",", "[", "pt_c", "[", "0", "]", ",", "1", ",", "pt_c", "[", "2", "]", "]", "]", ")", "z_val", "=", "np", ".", "linalg", ".", "det", "(", "[", "[", "pt_a", "[", "0", "]", ",", "pt_a", "[", "1", "]", ",", "1", "]", ",", "[", "pt_b", "[", "0", "]", ",", "pt_b", "[", "1", "]", ",", "1", "]", ",", "[", "pt_c", "[", "0", "]", ",", "pt_c", "[", "1", "]", ",", "1", "]", "]", ")", "magnitude", "=", "(", "x_val", "**", "2", "+", "y_val", "**", "2", "+", "z_val", "**", "2", ")", "**", ".5", "mag", "=", "(", "x_val", "/", "magnitude", ",", "y_val", "/", "magnitude", ",", "z_val", "/", "magnitude", ")", "if", "magnitude", "<", "0.00000001", ":", "mag", "=", "(", "0", ",", "0", ",", "0", ")", "return", "mag" ]
unit normal vector of plane defined by points pt_a, pt_b, and pt_c
[ "unit", "normal", "vector", "of", "plane", "defined", "by", "points", "pt_a", "pt_b", "and", "pt_c" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/surface.py#L50-L59
santoshphilip/eppy
eppy/geometry/surface.py
width
def width(poly): """Width of a polygon poly""" num = len(poly) - 1 if abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]): return dist(poly[num], poly[0]) elif abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]): return dist(poly[1], poly[0]) else: return max(dist(poly[num], poly[0]), dist(poly[1], poly[0]))
python
def width(poly): """Width of a polygon poly""" num = len(poly) - 1 if abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]): return dist(poly[num], poly[0]) elif abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]): return dist(poly[1], poly[0]) else: return max(dist(poly[num], poly[0]), dist(poly[1], poly[0]))
[ "def", "width", "(", "poly", ")", ":", "num", "=", "len", "(", "poly", ")", "-", "1", "if", "abs", "(", "poly", "[", "num", "]", "[", "2", "]", "-", "poly", "[", "0", "]", "[", "2", "]", ")", "<", "abs", "(", "poly", "[", "1", "]", "[", "2", "]", "-", "poly", "[", "0", "]", "[", "2", "]", ")", ":", "return", "dist", "(", "poly", "[", "num", "]", ",", "poly", "[", "0", "]", ")", "elif", "abs", "(", "poly", "[", "num", "]", "[", "2", "]", "-", "poly", "[", "0", "]", "[", "2", "]", ")", ">", "abs", "(", "poly", "[", "1", "]", "[", "2", "]", "-", "poly", "[", "0", "]", "[", "2", "]", ")", ":", "return", "dist", "(", "poly", "[", "1", "]", ",", "poly", "[", "0", "]", ")", "else", ":", "return", "max", "(", "dist", "(", "poly", "[", "num", "]", ",", "poly", "[", "0", "]", ")", ",", "dist", "(", "poly", "[", "1", "]", ",", "poly", "[", "0", "]", ")", ")" ]
Width of a polygon poly
[ "Width", "of", "a", "polygon", "poly" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/surface.py#L68-L75
santoshphilip/eppy
eppy/geometry/surface.py
height
def height(poly): """Height of a polygon poly""" num = len(poly) - 1 if abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]): return dist(poly[num], poly[0]) elif abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]): return dist(poly[1], poly[0]) else: return min(dist(poly[num], poly[0]), dist(poly[1], poly[0]))
python
def height(poly): """Height of a polygon poly""" num = len(poly) - 1 if abs(poly[num][2] - poly[0][2]) > abs(poly[1][2] - poly[0][2]): return dist(poly[num], poly[0]) elif abs(poly[num][2] - poly[0][2]) < abs(poly[1][2] - poly[0][2]): return dist(poly[1], poly[0]) else: return min(dist(poly[num], poly[0]), dist(poly[1], poly[0]))
[ "def", "height", "(", "poly", ")", ":", "num", "=", "len", "(", "poly", ")", "-", "1", "if", "abs", "(", "poly", "[", "num", "]", "[", "2", "]", "-", "poly", "[", "0", "]", "[", "2", "]", ")", ">", "abs", "(", "poly", "[", "1", "]", "[", "2", "]", "-", "poly", "[", "0", "]", "[", "2", "]", ")", ":", "return", "dist", "(", "poly", "[", "num", "]", ",", "poly", "[", "0", "]", ")", "elif", "abs", "(", "poly", "[", "num", "]", "[", "2", "]", "-", "poly", "[", "0", "]", "[", "2", "]", ")", "<", "abs", "(", "poly", "[", "1", "]", "[", "2", "]", "-", "poly", "[", "0", "]", "[", "2", "]", ")", ":", "return", "dist", "(", "poly", "[", "1", "]", ",", "poly", "[", "0", "]", ")", "else", ":", "return", "min", "(", "dist", "(", "poly", "[", "num", "]", ",", "poly", "[", "0", "]", ")", ",", "dist", "(", "poly", "[", "1", "]", ",", "poly", "[", "0", "]", ")", ")" ]
Height of a polygon poly
[ "Height", "of", "a", "polygon", "poly" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/surface.py#L78-L86
santoshphilip/eppy
eppy/geometry/surface.py
angle2vecs
def angle2vecs(vec1, vec2): """angle between two vectors""" # vector a * vector b = |a|*|b|* cos(angle between vector a and vector b) dot = np.dot(vec1, vec2) vec1_modulus = np.sqrt(np.multiply(vec1, vec1).sum()) vec2_modulus = np.sqrt(np.multiply(vec2, vec2).sum()) if (vec1_modulus * vec2_modulus) == 0: cos_angle = 1 else: cos_angle = dot / (vec1_modulus * vec2_modulus) return math.degrees(acos(cos_angle))
python
def angle2vecs(vec1, vec2): """angle between two vectors""" # vector a * vector b = |a|*|b|* cos(angle between vector a and vector b) dot = np.dot(vec1, vec2) vec1_modulus = np.sqrt(np.multiply(vec1, vec1).sum()) vec2_modulus = np.sqrt(np.multiply(vec2, vec2).sum()) if (vec1_modulus * vec2_modulus) == 0: cos_angle = 1 else: cos_angle = dot / (vec1_modulus * vec2_modulus) return math.degrees(acos(cos_angle))
[ "def", "angle2vecs", "(", "vec1", ",", "vec2", ")", ":", "# vector a * vector b = |a|*|b|* cos(angle between vector a and vector b)", "dot", "=", "np", ".", "dot", "(", "vec1", ",", "vec2", ")", "vec1_modulus", "=", "np", ".", "sqrt", "(", "np", ".", "multiply", "(", "vec1", ",", "vec1", ")", ".", "sum", "(", ")", ")", "vec2_modulus", "=", "np", ".", "sqrt", "(", "np", ".", "multiply", "(", "vec2", ",", "vec2", ")", ".", "sum", "(", ")", ")", "if", "(", "vec1_modulus", "*", "vec2_modulus", ")", "==", "0", ":", "cos_angle", "=", "1", "else", ":", "cos_angle", "=", "dot", "/", "(", "vec1_modulus", "*", "vec2_modulus", ")", "return", "math", ".", "degrees", "(", "acos", "(", "cos_angle", ")", ")" ]
angle between two vectors
[ "angle", "between", "two", "vectors" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/surface.py#L89-L98
santoshphilip/eppy
eppy/geometry/surface.py
azimuth
def azimuth(poly): """Azimuth of a polygon poly""" num = len(poly) - 1 vec = unit_normal(poly[0], poly[1], poly[num]) vec_azi = np.array([vec[0], vec[1], 0]) vec_n = np.array([0, 1, 0]) # update by Santosh # angle2vecs gives the smallest angle between the vectors # so for a west wall angle2vecs will give 90 # the following 'if' statement will make sure 270 is returned x_vector = vec_azi[0] if x_vector < 0: return 360 - angle2vecs(vec_azi, vec_n) else: return angle2vecs(vec_azi, vec_n)
python
def azimuth(poly): """Azimuth of a polygon poly""" num = len(poly) - 1 vec = unit_normal(poly[0], poly[1], poly[num]) vec_azi = np.array([vec[0], vec[1], 0]) vec_n = np.array([0, 1, 0]) # update by Santosh # angle2vecs gives the smallest angle between the vectors # so for a west wall angle2vecs will give 90 # the following 'if' statement will make sure 270 is returned x_vector = vec_azi[0] if x_vector < 0: return 360 - angle2vecs(vec_azi, vec_n) else: return angle2vecs(vec_azi, vec_n)
[ "def", "azimuth", "(", "poly", ")", ":", "num", "=", "len", "(", "poly", ")", "-", "1", "vec", "=", "unit_normal", "(", "poly", "[", "0", "]", ",", "poly", "[", "1", "]", ",", "poly", "[", "num", "]", ")", "vec_azi", "=", "np", ".", "array", "(", "[", "vec", "[", "0", "]", ",", "vec", "[", "1", "]", ",", "0", "]", ")", "vec_n", "=", "np", ".", "array", "(", "[", "0", ",", "1", ",", "0", "]", ")", "# update by Santosh", "# angle2vecs gives the smallest angle between the vectors", "# so for a west wall angle2vecs will give 90", "# the following 'if' statement will make sure 270 is returned", "x_vector", "=", "vec_azi", "[", "0", "]", "if", "x_vector", "<", "0", ":", "return", "360", "-", "angle2vecs", "(", "vec_azi", ",", "vec_n", ")", "else", ":", "return", "angle2vecs", "(", "vec_azi", ",", "vec_n", ")" ]
Azimuth of a polygon poly
[ "Azimuth", "of", "a", "polygon", "poly" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/surface.py#L101-L115
santoshphilip/eppy
eppy/geometry/surface.py
tilt
def tilt(poly): """Tilt of a polygon poly""" num = len(poly) - 1 vec = unit_normal(poly[0], poly[1], poly[num]) vec_alt = np.array([vec[0], vec[1], vec[2]]) vec_z = np.array([0, 0, 1]) # return (90 - angle2vecs(vec_alt, vec_z)) # update by Santosh return angle2vecs(vec_alt, vec_z)
python
def tilt(poly): """Tilt of a polygon poly""" num = len(poly) - 1 vec = unit_normal(poly[0], poly[1], poly[num]) vec_alt = np.array([vec[0], vec[1], vec[2]]) vec_z = np.array([0, 0, 1]) # return (90 - angle2vecs(vec_alt, vec_z)) # update by Santosh return angle2vecs(vec_alt, vec_z)
[ "def", "tilt", "(", "poly", ")", ":", "num", "=", "len", "(", "poly", ")", "-", "1", "vec", "=", "unit_normal", "(", "poly", "[", "0", "]", ",", "poly", "[", "1", "]", ",", "poly", "[", "num", "]", ")", "vec_alt", "=", "np", ".", "array", "(", "[", "vec", "[", "0", "]", ",", "vec", "[", "1", "]", ",", "vec", "[", "2", "]", "]", ")", "vec_z", "=", "np", ".", "array", "(", "[", "0", ",", "0", ",", "1", "]", ")", "# return (90 - angle2vecs(vec_alt, vec_z)) # update by Santosh", "return", "angle2vecs", "(", "vec_alt", ",", "vec_z", ")" ]
Tilt of a polygon poly
[ "Tilt", "of", "a", "polygon", "poly" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/geometry/surface.py#L117-L124
santoshphilip/eppy
eppy/iddgaps.py
getfields
def getfields(comm): """get all the fields that have the key 'field' """ fields = [] for field in comm: if 'field' in field: fields.append(field) return fields
python
def getfields(comm): """get all the fields that have the key 'field' """ fields = [] for field in comm: if 'field' in field: fields.append(field) return fields
[ "def", "getfields", "(", "comm", ")", ":", "fields", "=", "[", "]", "for", "field", "in", "comm", ":", "if", "'field'", "in", "field", ":", "fields", ".", "append", "(", "field", ")", "return", "fields" ]
get all the fields that have the key 'field'
[ "get", "all", "the", "fields", "that", "have", "the", "key", "field" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/iddgaps.py#L63-L69
santoshphilip/eppy
eppy/iddgaps.py
repeatingfieldsnames
def repeatingfieldsnames(fields): """get the names of the repeating fields""" fnames = [field['field'][0] for field in fields] fnames = [bunchhelpers.onlylegalchar(fname) for fname in fnames] fnames = [fname for fname in fnames if bunchhelpers.intinlist(fname.split())] fnames = [(bunchhelpers.replaceint(fname), None) for fname in fnames] dct = dict(fnames) repnames = fnames[:len(list(dct.keys()))] return repnames
python
def repeatingfieldsnames(fields): """get the names of the repeating fields""" fnames = [field['field'][0] for field in fields] fnames = [bunchhelpers.onlylegalchar(fname) for fname in fnames] fnames = [fname for fname in fnames if bunchhelpers.intinlist(fname.split())] fnames = [(bunchhelpers.replaceint(fname), None) for fname in fnames] dct = dict(fnames) repnames = fnames[:len(list(dct.keys()))] return repnames
[ "def", "repeatingfieldsnames", "(", "fields", ")", ":", "fnames", "=", "[", "field", "[", "'field'", "]", "[", "0", "]", "for", "field", "in", "fields", "]", "fnames", "=", "[", "bunchhelpers", ".", "onlylegalchar", "(", "fname", ")", "for", "fname", "in", "fnames", "]", "fnames", "=", "[", "fname", "for", "fname", "in", "fnames", "if", "bunchhelpers", ".", "intinlist", "(", "fname", ".", "split", "(", ")", ")", "]", "fnames", "=", "[", "(", "bunchhelpers", ".", "replaceint", "(", "fname", ")", ",", "None", ")", "for", "fname", "in", "fnames", "]", "dct", "=", "dict", "(", "fnames", ")", "repnames", "=", "fnames", "[", ":", "len", "(", "list", "(", "dct", ".", "keys", "(", ")", ")", ")", "]", "return", "repnames" ]
get the names of the repeating fields
[ "get", "the", "names", "of", "the", "repeating", "fields" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/iddgaps.py#L71-L79
santoshphilip/eppy
eppy/iddgaps.py
missingkeys_standard
def missingkeys_standard(commdct, dtls, skiplist=None): """put missing keys in commdct for standard objects return a list of keys where it is unable to do so commdct is not returned, but is updated""" if skiplist == None: skiplist = [] # find objects where all the fields are not named gkeys = [dtls[i] for i in range(len(dtls)) if commdct[i].count({}) > 2] nofirstfields = [] # operatie on those fields for key_txt in gkeys: if key_txt in skiplist: continue # print key_txt # for a function, pass comm as a variable key_i = dtls.index(key_txt.upper()) comm = commdct[key_i] # get all fields fields = getfields(comm) # get repeating field names repnames = repeatingfieldsnames(fields) try: first = repnames[0][0] % (1, ) except IndexError: nofirstfields.append(key_txt) continue # print first # get all comments of the first repeating field names firstnames = [repname[0] % (1, ) for repname in repnames] fcomments = [field for field in fields if bunchhelpers.onlylegalchar(field['field'][0]) in firstnames] fcomments = [dict(fcomment) for fcomment in fcomments] for cmt in fcomments: fld = cmt['field'][0] fld = bunchhelpers.onlylegalchar(fld) fld = bunchhelpers.replaceint(fld) cmt['field'] = [fld] for i, cmt in enumerate(comm[1:]): thefield = cmt['field'][0] thefield = bunchhelpers.onlylegalchar(thefield) if thefield == first: break first_i = i + 1 newfields = [] for i in range(1, len(comm[first_i:]) // len(repnames) + 1): for fcomment in fcomments: nfcomment = dict(fcomment) fld = nfcomment['field'][0] fld = fld % (i, ) nfcomment['field'] = [fld] newfields.append(nfcomment) for i, cmt in enumerate(comm): if i < first_i: continue else: afield = newfields.pop(0) comm[i] = afield commdct[key_i] = comm return nofirstfields
python
def missingkeys_standard(commdct, dtls, skiplist=None): """put missing keys in commdct for standard objects return a list of keys where it is unable to do so commdct is not returned, but is updated""" if skiplist == None: skiplist = [] # find objects where all the fields are not named gkeys = [dtls[i] for i in range(len(dtls)) if commdct[i].count({}) > 2] nofirstfields = [] # operatie on those fields for key_txt in gkeys: if key_txt in skiplist: continue # print key_txt # for a function, pass comm as a variable key_i = dtls.index(key_txt.upper()) comm = commdct[key_i] # get all fields fields = getfields(comm) # get repeating field names repnames = repeatingfieldsnames(fields) try: first = repnames[0][0] % (1, ) except IndexError: nofirstfields.append(key_txt) continue # print first # get all comments of the first repeating field names firstnames = [repname[0] % (1, ) for repname in repnames] fcomments = [field for field in fields if bunchhelpers.onlylegalchar(field['field'][0]) in firstnames] fcomments = [dict(fcomment) for fcomment in fcomments] for cmt in fcomments: fld = cmt['field'][0] fld = bunchhelpers.onlylegalchar(fld) fld = bunchhelpers.replaceint(fld) cmt['field'] = [fld] for i, cmt in enumerate(comm[1:]): thefield = cmt['field'][0] thefield = bunchhelpers.onlylegalchar(thefield) if thefield == first: break first_i = i + 1 newfields = [] for i in range(1, len(comm[first_i:]) // len(repnames) + 1): for fcomment in fcomments: nfcomment = dict(fcomment) fld = nfcomment['field'][0] fld = fld % (i, ) nfcomment['field'] = [fld] newfields.append(nfcomment) for i, cmt in enumerate(comm): if i < first_i: continue else: afield = newfields.pop(0) comm[i] = afield commdct[key_i] = comm return nofirstfields
[ "def", "missingkeys_standard", "(", "commdct", ",", "dtls", ",", "skiplist", "=", "None", ")", ":", "if", "skiplist", "==", "None", ":", "skiplist", "=", "[", "]", "# find objects where all the fields are not named", "gkeys", "=", "[", "dtls", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "dtls", ")", ")", "if", "commdct", "[", "i", "]", ".", "count", "(", "{", "}", ")", ">", "2", "]", "nofirstfields", "=", "[", "]", "# operatie on those fields", "for", "key_txt", "in", "gkeys", ":", "if", "key_txt", "in", "skiplist", ":", "continue", "# print key_txt", "# for a function, pass comm as a variable", "key_i", "=", "dtls", ".", "index", "(", "key_txt", ".", "upper", "(", ")", ")", "comm", "=", "commdct", "[", "key_i", "]", "# get all fields", "fields", "=", "getfields", "(", "comm", ")", "# get repeating field names", "repnames", "=", "repeatingfieldsnames", "(", "fields", ")", "try", ":", "first", "=", "repnames", "[", "0", "]", "[", "0", "]", "%", "(", "1", ",", ")", "except", "IndexError", ":", "nofirstfields", ".", "append", "(", "key_txt", ")", "continue", "# print first", "# get all comments of the first repeating field names", "firstnames", "=", "[", "repname", "[", "0", "]", "%", "(", "1", ",", ")", "for", "repname", "in", "repnames", "]", "fcomments", "=", "[", "field", "for", "field", "in", "fields", "if", "bunchhelpers", ".", "onlylegalchar", "(", "field", "[", "'field'", "]", "[", "0", "]", ")", "in", "firstnames", "]", "fcomments", "=", "[", "dict", "(", "fcomment", ")", "for", "fcomment", "in", "fcomments", "]", "for", "cmt", "in", "fcomments", ":", "fld", "=", "cmt", "[", "'field'", "]", "[", "0", "]", "fld", "=", "bunchhelpers", ".", "onlylegalchar", "(", "fld", ")", "fld", "=", "bunchhelpers", ".", "replaceint", "(", "fld", ")", "cmt", "[", "'field'", "]", "=", "[", "fld", "]", "for", "i", ",", "cmt", "in", "enumerate", "(", "comm", "[", "1", ":", "]", ")", ":", "thefield", "=", "cmt", "[", "'field'", "]", "[", "0", "]", "thefield", "=", "bunchhelpers", ".", "onlylegalchar", "(", "thefield", ")", "if", "thefield", "==", "first", ":", "break", "first_i", "=", "i", "+", "1", "newfields", "=", "[", "]", "for", "i", "in", "range", "(", "1", ",", "len", "(", "comm", "[", "first_i", ":", "]", ")", "//", "len", "(", "repnames", ")", "+", "1", ")", ":", "for", "fcomment", "in", "fcomments", ":", "nfcomment", "=", "dict", "(", "fcomment", ")", "fld", "=", "nfcomment", "[", "'field'", "]", "[", "0", "]", "fld", "=", "fld", "%", "(", "i", ",", ")", "nfcomment", "[", "'field'", "]", "=", "[", "fld", "]", "newfields", ".", "append", "(", "nfcomment", ")", "for", "i", ",", "cmt", "in", "enumerate", "(", "comm", ")", ":", "if", "i", "<", "first_i", ":", "continue", "else", ":", "afield", "=", "newfields", ".", "pop", "(", "0", ")", "comm", "[", "i", "]", "=", "afield", "commdct", "[", "key_i", "]", "=", "comm", "return", "nofirstfields" ]
put missing keys in commdct for standard objects return a list of keys where it is unable to do so commdct is not returned, but is updated
[ "put", "missing", "keys", "in", "commdct", "for", "standard", "objects", "return", "a", "list", "of", "keys", "where", "it", "is", "unable", "to", "do", "so", "commdct", "is", "not", "returned", "but", "is", "updated" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/iddgaps.py#L82-L150
santoshphilip/eppy
eppy/iddgaps.py
missingkeys_nonstandard
def missingkeys_nonstandard(block, commdct, dtls, objectlist, afield='afiled %s'): """This is an object list where thre is no first field name to give a hint of what the first field name should be""" afield = 'afield %s' for key_txt in objectlist: key_i = dtls.index(key_txt.upper()) comm = commdct[key_i] if block: blk = block[key_i] for i, cmt in enumerate(comm): if cmt == {}: first_i = i break for i, cmt in enumerate(comm): if i >= first_i: if block: comm[i]['field'] = ['%s' % (blk[i])] else: comm[i]['field'] = [afield % (i - first_i + 1,),]
python
def missingkeys_nonstandard(block, commdct, dtls, objectlist, afield='afiled %s'): """This is an object list where thre is no first field name to give a hint of what the first field name should be""" afield = 'afield %s' for key_txt in objectlist: key_i = dtls.index(key_txt.upper()) comm = commdct[key_i] if block: blk = block[key_i] for i, cmt in enumerate(comm): if cmt == {}: first_i = i break for i, cmt in enumerate(comm): if i >= first_i: if block: comm[i]['field'] = ['%s' % (blk[i])] else: comm[i]['field'] = [afield % (i - first_i + 1,),]
[ "def", "missingkeys_nonstandard", "(", "block", ",", "commdct", ",", "dtls", ",", "objectlist", ",", "afield", "=", "'afiled %s'", ")", ":", "afield", "=", "'afield %s'", "for", "key_txt", "in", "objectlist", ":", "key_i", "=", "dtls", ".", "index", "(", "key_txt", ".", "upper", "(", ")", ")", "comm", "=", "commdct", "[", "key_i", "]", "if", "block", ":", "blk", "=", "block", "[", "key_i", "]", "for", "i", ",", "cmt", "in", "enumerate", "(", "comm", ")", ":", "if", "cmt", "==", "{", "}", ":", "first_i", "=", "i", "break", "for", "i", ",", "cmt", "in", "enumerate", "(", "comm", ")", ":", "if", "i", ">=", "first_i", ":", "if", "block", ":", "comm", "[", "i", "]", "[", "'field'", "]", "=", "[", "'%s'", "%", "(", "blk", "[", "i", "]", ")", "]", "else", ":", "comm", "[", "i", "]", "[", "'field'", "]", "=", "[", "afield", "%", "(", "i", "-", "first_i", "+", "1", ",", ")", ",", "]" ]
This is an object list where thre is no first field name to give a hint of what the first field name should be
[ "This", "is", "an", "object", "list", "where", "thre", "is", "no", "first", "field", "name", "to", "give", "a", "hint", "of", "what", "the", "first", "field", "name", "should", "be" ]
train
https://github.com/santoshphilip/eppy/blob/55410ff7c11722f35bc4331ff5e00a0b86f787e1/eppy/iddgaps.py#L152-L170
GoogleCloudPlatform/datastore-ndb-python
ndb/eventloop.py
get_event_loop
def get_event_loop(): """Return a EventLoop instance. A new instance is created for each new HTTP request. We determine that we're in a new request by inspecting os.environ, which is reset at the start of each request. Also, each thread gets its own loop. """ ev = _state.event_loop if not os.getenv(_EVENT_LOOP_KEY) and ev is not None: ev.clear() _state.event_loop = None ev = None if ev is None: ev = EventLoop() _state.event_loop = ev os.environ[_EVENT_LOOP_KEY] = '1' return ev
python
def get_event_loop(): """Return a EventLoop instance. A new instance is created for each new HTTP request. We determine that we're in a new request by inspecting os.environ, which is reset at the start of each request. Also, each thread gets its own loop. """ ev = _state.event_loop if not os.getenv(_EVENT_LOOP_KEY) and ev is not None: ev.clear() _state.event_loop = None ev = None if ev is None: ev = EventLoop() _state.event_loop = ev os.environ[_EVENT_LOOP_KEY] = '1' return ev
[ "def", "get_event_loop", "(", ")", ":", "ev", "=", "_state", ".", "event_loop", "if", "not", "os", ".", "getenv", "(", "_EVENT_LOOP_KEY", ")", "and", "ev", "is", "not", "None", ":", "ev", ".", "clear", "(", ")", "_state", ".", "event_loop", "=", "None", "ev", "=", "None", "if", "ev", "is", "None", ":", "ev", "=", "EventLoop", "(", ")", "_state", ".", "event_loop", "=", "ev", "os", ".", "environ", "[", "_EVENT_LOOP_KEY", "]", "=", "'1'", "return", "ev" ]
Return a EventLoop instance. A new instance is created for each new HTTP request. We determine that we're in a new request by inspecting os.environ, which is reset at the start of each request. Also, each thread gets its own loop.
[ "Return", "a", "EventLoop", "instance", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L293-L309
GoogleCloudPlatform/datastore-ndb-python
ndb/eventloop.py
EventLoop.clear
def clear(self): """Remove all pending events without running any.""" while self.current or self.idlers or self.queue or self.rpcs: current = self.current idlers = self.idlers queue = self.queue rpcs = self.rpcs _logging_debug('Clearing stale EventLoop instance...') if current: _logging_debug(' current = %s', current) if idlers: _logging_debug(' idlers = %s', idlers) if queue: _logging_debug(' queue = %s', queue) if rpcs: _logging_debug(' rpcs = %s', rpcs) self.__init__() current.clear() idlers.clear() queue[:] = [] rpcs.clear() _logging_debug('Cleared')
python
def clear(self): """Remove all pending events without running any.""" while self.current or self.idlers or self.queue or self.rpcs: current = self.current idlers = self.idlers queue = self.queue rpcs = self.rpcs _logging_debug('Clearing stale EventLoop instance...') if current: _logging_debug(' current = %s', current) if idlers: _logging_debug(' idlers = %s', idlers) if queue: _logging_debug(' queue = %s', queue) if rpcs: _logging_debug(' rpcs = %s', rpcs) self.__init__() current.clear() idlers.clear() queue[:] = [] rpcs.clear() _logging_debug('Cleared')
[ "def", "clear", "(", "self", ")", ":", "while", "self", ".", "current", "or", "self", ".", "idlers", "or", "self", ".", "queue", "or", "self", ".", "rpcs", ":", "current", "=", "self", ".", "current", "idlers", "=", "self", ".", "idlers", "queue", "=", "self", ".", "queue", "rpcs", "=", "self", ".", "rpcs", "_logging_debug", "(", "'Clearing stale EventLoop instance...'", ")", "if", "current", ":", "_logging_debug", "(", "' current = %s'", ",", "current", ")", "if", "idlers", ":", "_logging_debug", "(", "' idlers = %s'", ",", "idlers", ")", "if", "queue", ":", "_logging_debug", "(", "' queue = %s'", ",", "queue", ")", "if", "rpcs", ":", "_logging_debug", "(", "' rpcs = %s'", ",", "rpcs", ")", "self", ".", "__init__", "(", ")", "current", ".", "clear", "(", ")", "idlers", ".", "clear", "(", ")", "queue", "[", ":", "]", "=", "[", "]", "rpcs", ".", "clear", "(", ")", "_logging_debug", "(", "'Cleared'", ")" ]
Remove all pending events without running any.
[ "Remove", "all", "pending", "events", "without", "running", "any", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L90-L111
GoogleCloudPlatform/datastore-ndb-python
ndb/eventloop.py
EventLoop.insort_event_right
def insort_event_right(self, event, lo=0, hi=None): """Insert event in queue, and keep it sorted assuming queue is sorted. If event is already in queue, insert it to the right of the rightmost event (to keep FIFO order). Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. Args: event: a (time in sec since unix epoch, callback, args, kwds) tuple. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(self.queue) while lo < hi: mid = (lo + hi) // 2 if event[0] < self.queue[mid][0]: hi = mid else: lo = mid + 1 self.queue.insert(lo, event)
python
def insort_event_right(self, event, lo=0, hi=None): """Insert event in queue, and keep it sorted assuming queue is sorted. If event is already in queue, insert it to the right of the rightmost event (to keep FIFO order). Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. Args: event: a (time in sec since unix epoch, callback, args, kwds) tuple. """ if lo < 0: raise ValueError('lo must be non-negative') if hi is None: hi = len(self.queue) while lo < hi: mid = (lo + hi) // 2 if event[0] < self.queue[mid][0]: hi = mid else: lo = mid + 1 self.queue.insert(lo, event)
[ "def", "insort_event_right", "(", "self", ",", "event", ",", "lo", "=", "0", ",", "hi", "=", "None", ")", ":", "if", "lo", "<", "0", ":", "raise", "ValueError", "(", "'lo must be non-negative'", ")", "if", "hi", "is", "None", ":", "hi", "=", "len", "(", "self", ".", "queue", ")", "while", "lo", "<", "hi", ":", "mid", "=", "(", "lo", "+", "hi", ")", "//", "2", "if", "event", "[", "0", "]", "<", "self", ".", "queue", "[", "mid", "]", "[", "0", "]", ":", "hi", "=", "mid", "else", ":", "lo", "=", "mid", "+", "1", "self", ".", "queue", ".", "insert", "(", "lo", ",", "event", ")" ]
Insert event in queue, and keep it sorted assuming queue is sorted. If event is already in queue, insert it to the right of the rightmost event (to keep FIFO order). Optional args lo (default 0) and hi (default len(a)) bound the slice of a to be searched. Args: event: a (time in sec since unix epoch, callback, args, kwds) tuple.
[ "Insert", "event", "in", "queue", "and", "keep", "it", "sorted", "assuming", "queue", "is", "sorted", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L113-L136
GoogleCloudPlatform/datastore-ndb-python
ndb/eventloop.py
EventLoop.queue_call
def queue_call(self, delay, callback, *args, **kwds): """Schedule a function call at a specific time in the future.""" if delay is None: self.current.append((callback, args, kwds)) return if delay < 1e9: when = delay + self.clock.now() else: # Times over a billion seconds are assumed to be absolute. when = delay self.insort_event_right((when, callback, args, kwds))
python
def queue_call(self, delay, callback, *args, **kwds): """Schedule a function call at a specific time in the future.""" if delay is None: self.current.append((callback, args, kwds)) return if delay < 1e9: when = delay + self.clock.now() else: # Times over a billion seconds are assumed to be absolute. when = delay self.insort_event_right((when, callback, args, kwds))
[ "def", "queue_call", "(", "self", ",", "delay", ",", "callback", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "if", "delay", "is", "None", ":", "self", ".", "current", ".", "append", "(", "(", "callback", ",", "args", ",", "kwds", ")", ")", "return", "if", "delay", "<", "1e9", ":", "when", "=", "delay", "+", "self", ".", "clock", ".", "now", "(", ")", "else", ":", "# Times over a billion seconds are assumed to be absolute.", "when", "=", "delay", "self", ".", "insort_event_right", "(", "(", "when", ",", "callback", ",", "args", ",", "kwds", ")", ")" ]
Schedule a function call at a specific time in the future.
[ "Schedule", "a", "function", "call", "at", "a", "specific", "time", "in", "the", "future", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L138-L148
GoogleCloudPlatform/datastore-ndb-python
ndb/eventloop.py
EventLoop.queue_rpc
def queue_rpc(self, rpc, callback=None, *args, **kwds): """Schedule an RPC with an optional callback. The caller must have previously sent the call to the service. The optional callback is called with the remaining arguments. NOTE: If the rpc is a MultiRpc, the callback will be called once for each sub-RPC. TODO: Is this a good idea? """ if rpc is None: return if rpc.state not in (_RUNNING, _FINISHING): raise RuntimeError('rpc must be sent to service before queueing') if isinstance(rpc, datastore_rpc.MultiRpc): rpcs = rpc.rpcs if len(rpcs) > 1: # Don't call the callback until all sub-rpcs have completed. rpc.__done = False def help_multi_rpc_along(r=rpc, c=callback, a=args, k=kwds): if r.state == _FINISHING and not r.__done: r.__done = True c(*a, **k) # TODO: And again, what about exceptions? callback = help_multi_rpc_along args = () kwds = {} else: rpcs = [rpc] for rpc in rpcs: self.rpcs[rpc] = (callback, args, kwds)
python
def queue_rpc(self, rpc, callback=None, *args, **kwds): """Schedule an RPC with an optional callback. The caller must have previously sent the call to the service. The optional callback is called with the remaining arguments. NOTE: If the rpc is a MultiRpc, the callback will be called once for each sub-RPC. TODO: Is this a good idea? """ if rpc is None: return if rpc.state not in (_RUNNING, _FINISHING): raise RuntimeError('rpc must be sent to service before queueing') if isinstance(rpc, datastore_rpc.MultiRpc): rpcs = rpc.rpcs if len(rpcs) > 1: # Don't call the callback until all sub-rpcs have completed. rpc.__done = False def help_multi_rpc_along(r=rpc, c=callback, a=args, k=kwds): if r.state == _FINISHING and not r.__done: r.__done = True c(*a, **k) # TODO: And again, what about exceptions? callback = help_multi_rpc_along args = () kwds = {} else: rpcs = [rpc] for rpc in rpcs: self.rpcs[rpc] = (callback, args, kwds)
[ "def", "queue_rpc", "(", "self", ",", "rpc", ",", "callback", "=", "None", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "if", "rpc", "is", "None", ":", "return", "if", "rpc", ".", "state", "not", "in", "(", "_RUNNING", ",", "_FINISHING", ")", ":", "raise", "RuntimeError", "(", "'rpc must be sent to service before queueing'", ")", "if", "isinstance", "(", "rpc", ",", "datastore_rpc", ".", "MultiRpc", ")", ":", "rpcs", "=", "rpc", ".", "rpcs", "if", "len", "(", "rpcs", ")", ">", "1", ":", "# Don't call the callback until all sub-rpcs have completed.", "rpc", ".", "__done", "=", "False", "def", "help_multi_rpc_along", "(", "r", "=", "rpc", ",", "c", "=", "callback", ",", "a", "=", "args", ",", "k", "=", "kwds", ")", ":", "if", "r", ".", "state", "==", "_FINISHING", "and", "not", "r", ".", "__done", ":", "r", ".", "__done", "=", "True", "c", "(", "*", "a", ",", "*", "*", "k", ")", "# TODO: And again, what about exceptions?", "callback", "=", "help_multi_rpc_along", "args", "=", "(", ")", "kwds", "=", "{", "}", "else", ":", "rpcs", "=", "[", "rpc", "]", "for", "rpc", "in", "rpcs", ":", "self", ".", "rpcs", "[", "rpc", "]", "=", "(", "callback", ",", "args", ",", "kwds", ")" ]
Schedule an RPC with an optional callback. The caller must have previously sent the call to the service. The optional callback is called with the remaining arguments. NOTE: If the rpc is a MultiRpc, the callback will be called once for each sub-RPC. TODO: Is this a good idea?
[ "Schedule", "an", "RPC", "with", "an", "optional", "callback", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L150-L180
GoogleCloudPlatform/datastore-ndb-python
ndb/eventloop.py
EventLoop.add_idle
def add_idle(self, callback, *args, **kwds): """Add an idle callback. An idle callback can return True, False or None. These mean: - None: remove the callback (don't reschedule) - False: the callback did no work; reschedule later - True: the callback did some work; reschedule soon If the callback raises an exception, the traceback is logged and the callback is removed. """ self.idlers.append((callback, args, kwds))
python
def add_idle(self, callback, *args, **kwds): """Add an idle callback. An idle callback can return True, False or None. These mean: - None: remove the callback (don't reschedule) - False: the callback did no work; reschedule later - True: the callback did some work; reschedule soon If the callback raises an exception, the traceback is logged and the callback is removed. """ self.idlers.append((callback, args, kwds))
[ "def", "add_idle", "(", "self", ",", "callback", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "self", ".", "idlers", ".", "append", "(", "(", "callback", ",", "args", ",", "kwds", ")", ")" ]
Add an idle callback. An idle callback can return True, False or None. These mean: - None: remove the callback (don't reschedule) - False: the callback did no work; reschedule later - True: the callback did some work; reschedule soon If the callback raises an exception, the traceback is logged and the callback is removed.
[ "Add", "an", "idle", "callback", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L182-L194
GoogleCloudPlatform/datastore-ndb-python
ndb/eventloop.py
EventLoop.run_idle
def run_idle(self): """Run one of the idle callbacks. Returns: True if one was called, False if no idle callback was called. """ if not self.idlers or self.inactive >= len(self.idlers): return False idler = self.idlers.popleft() callback, args, kwds = idler _logging_debug('idler: %s', callback.__name__) res = callback(*args, **kwds) # See add_idle() for the meaning of the callback return value. if res is not None: if res: self.inactive = 0 else: self.inactive += 1 self.idlers.append(idler) else: _logging_debug('idler %s removed', callback.__name__) return True
python
def run_idle(self): """Run one of the idle callbacks. Returns: True if one was called, False if no idle callback was called. """ if not self.idlers or self.inactive >= len(self.idlers): return False idler = self.idlers.popleft() callback, args, kwds = idler _logging_debug('idler: %s', callback.__name__) res = callback(*args, **kwds) # See add_idle() for the meaning of the callback return value. if res is not None: if res: self.inactive = 0 else: self.inactive += 1 self.idlers.append(idler) else: _logging_debug('idler %s removed', callback.__name__) return True
[ "def", "run_idle", "(", "self", ")", ":", "if", "not", "self", ".", "idlers", "or", "self", ".", "inactive", ">=", "len", "(", "self", ".", "idlers", ")", ":", "return", "False", "idler", "=", "self", ".", "idlers", ".", "popleft", "(", ")", "callback", ",", "args", ",", "kwds", "=", "idler", "_logging_debug", "(", "'idler: %s'", ",", "callback", ".", "__name__", ")", "res", "=", "callback", "(", "*", "args", ",", "*", "*", "kwds", ")", "# See add_idle() for the meaning of the callback return value.", "if", "res", "is", "not", "None", ":", "if", "res", ":", "self", ".", "inactive", "=", "0", "else", ":", "self", ".", "inactive", "+=", "1", "self", ".", "idlers", ".", "append", "(", "idler", ")", "else", ":", "_logging_debug", "(", "'idler %s removed'", ",", "callback", ".", "__name__", ")", "return", "True" ]
Run one of the idle callbacks. Returns: True if one was called, False if no idle callback was called.
[ "Run", "one", "of", "the", "idle", "callbacks", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L196-L217
GoogleCloudPlatform/datastore-ndb-python
ndb/eventloop.py
EventLoop.run0
def run0(self): """Run one item (a callback or an RPC wait_any). Returns: A time to sleep if something happened (may be 0); None if all queues are empty. """ if self.current: self.inactive = 0 callback, args, kwds = self.current.popleft() _logging_debug('nowevent: %s', callback.__name__) callback(*args, **kwds) return 0 if self.run_idle(): return 0 delay = None if self.queue: delay = self.queue[0][0] - self.clock.now() if delay <= 0: self.inactive = 0 _, callback, args, kwds = self.queue.pop(0) _logging_debug('event: %s', callback.__name__) callback(*args, **kwds) # TODO: What if it raises an exception? return 0 if self.rpcs: self.inactive = 0 rpc = datastore_rpc.MultiRpc.wait_any(self.rpcs) if rpc is not None: _logging_debug('rpc: %s.%s', rpc.service, rpc.method) # Yes, wait_any() may return None even for a non-empty argument. # But no, it won't ever return an RPC not in its argument. if rpc not in self.rpcs: raise RuntimeError('rpc %r was not given to wait_any as a choice %r' % (rpc, self.rpcs)) callback, args, kwds = self.rpcs[rpc] del self.rpcs[rpc] if callback is not None: callback(*args, **kwds) # TODO: Again, what about exceptions? return 0 return delay
python
def run0(self): """Run one item (a callback or an RPC wait_any). Returns: A time to sleep if something happened (may be 0); None if all queues are empty. """ if self.current: self.inactive = 0 callback, args, kwds = self.current.popleft() _logging_debug('nowevent: %s', callback.__name__) callback(*args, **kwds) return 0 if self.run_idle(): return 0 delay = None if self.queue: delay = self.queue[0][0] - self.clock.now() if delay <= 0: self.inactive = 0 _, callback, args, kwds = self.queue.pop(0) _logging_debug('event: %s', callback.__name__) callback(*args, **kwds) # TODO: What if it raises an exception? return 0 if self.rpcs: self.inactive = 0 rpc = datastore_rpc.MultiRpc.wait_any(self.rpcs) if rpc is not None: _logging_debug('rpc: %s.%s', rpc.service, rpc.method) # Yes, wait_any() may return None even for a non-empty argument. # But no, it won't ever return an RPC not in its argument. if rpc not in self.rpcs: raise RuntimeError('rpc %r was not given to wait_any as a choice %r' % (rpc, self.rpcs)) callback, args, kwds = self.rpcs[rpc] del self.rpcs[rpc] if callback is not None: callback(*args, **kwds) # TODO: Again, what about exceptions? return 0 return delay
[ "def", "run0", "(", "self", ")", ":", "if", "self", ".", "current", ":", "self", ".", "inactive", "=", "0", "callback", ",", "args", ",", "kwds", "=", "self", ".", "current", ".", "popleft", "(", ")", "_logging_debug", "(", "'nowevent: %s'", ",", "callback", ".", "__name__", ")", "callback", "(", "*", "args", ",", "*", "*", "kwds", ")", "return", "0", "if", "self", ".", "run_idle", "(", ")", ":", "return", "0", "delay", "=", "None", "if", "self", ".", "queue", ":", "delay", "=", "self", ".", "queue", "[", "0", "]", "[", "0", "]", "-", "self", ".", "clock", ".", "now", "(", ")", "if", "delay", "<=", "0", ":", "self", ".", "inactive", "=", "0", "_", ",", "callback", ",", "args", ",", "kwds", "=", "self", ".", "queue", ".", "pop", "(", "0", ")", "_logging_debug", "(", "'event: %s'", ",", "callback", ".", "__name__", ")", "callback", "(", "*", "args", ",", "*", "*", "kwds", ")", "# TODO: What if it raises an exception?", "return", "0", "if", "self", ".", "rpcs", ":", "self", ".", "inactive", "=", "0", "rpc", "=", "datastore_rpc", ".", "MultiRpc", ".", "wait_any", "(", "self", ".", "rpcs", ")", "if", "rpc", "is", "not", "None", ":", "_logging_debug", "(", "'rpc: %s.%s'", ",", "rpc", ".", "service", ",", "rpc", ".", "method", ")", "# Yes, wait_any() may return None even for a non-empty argument.", "# But no, it won't ever return an RPC not in its argument.", "if", "rpc", "not", "in", "self", ".", "rpcs", ":", "raise", "RuntimeError", "(", "'rpc %r was not given to wait_any as a choice %r'", "%", "(", "rpc", ",", "self", ".", "rpcs", ")", ")", "callback", ",", "args", ",", "kwds", "=", "self", ".", "rpcs", "[", "rpc", "]", "del", "self", ".", "rpcs", "[", "rpc", "]", "if", "callback", "is", "not", "None", ":", "callback", "(", "*", "args", ",", "*", "*", "kwds", ")", "# TODO: Again, what about exceptions?", "return", "0", "return", "delay" ]
Run one item (a callback or an RPC wait_any). Returns: A time to sleep if something happened (may be 0); None if all queues are empty.
[ "Run", "one", "item", "(", "a", "callback", "or", "an", "RPC", "wait_any", ")", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L219-L260
GoogleCloudPlatform/datastore-ndb-python
ndb/eventloop.py
EventLoop.run1
def run1(self): """Run one item (a callback or an RPC wait_any) or sleep. Returns: True if something happened; False if all queues are empty. """ delay = self.run0() if delay is None: return False if delay > 0: self.clock.sleep(delay) return True
python
def run1(self): """Run one item (a callback or an RPC wait_any) or sleep. Returns: True if something happened; False if all queues are empty. """ delay = self.run0() if delay is None: return False if delay > 0: self.clock.sleep(delay) return True
[ "def", "run1", "(", "self", ")", ":", "delay", "=", "self", ".", "run0", "(", ")", "if", "delay", "is", "None", ":", "return", "False", "if", "delay", ">", "0", ":", "self", ".", "clock", ".", "sleep", "(", "delay", ")", "return", "True" ]
Run one item (a callback or an RPC wait_any) or sleep. Returns: True if something happened; False if all queues are empty.
[ "Run", "one", "item", "(", "a", "callback", "or", "an", "RPC", "wait_any", ")", "or", "sleep", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/eventloop.py#L262-L273
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
_args_to_val
def _args_to_val(func, args): """Helper for GQL parsing to extract values from GQL expressions. This can extract the value from a GQL literal, return a Parameter for a GQL bound parameter (:1 or :foo), and interprets casts like KEY(...) and plain lists of values like (1, 2, 3). Args: func: A string indicating what kind of thing this is. args: One or more GQL values, each integer, string, or GQL literal. """ from .google_imports import gql # Late import, to avoid name conflict. vals = [] for arg in args: if isinstance(arg, (int, long, basestring)): val = Parameter(arg) elif isinstance(arg, gql.Literal): val = arg.Get() else: raise TypeError('Unexpected arg (%r)' % arg) vals.append(val) if func == 'nop': if len(vals) != 1: raise TypeError('"nop" requires exactly one value') return vals[0] # May be a Parameter pfunc = ParameterizedFunction(func, vals) if pfunc.is_parameterized(): return pfunc else: return pfunc.resolve({}, {})
python
def _args_to_val(func, args): """Helper for GQL parsing to extract values from GQL expressions. This can extract the value from a GQL literal, return a Parameter for a GQL bound parameter (:1 or :foo), and interprets casts like KEY(...) and plain lists of values like (1, 2, 3). Args: func: A string indicating what kind of thing this is. args: One or more GQL values, each integer, string, or GQL literal. """ from .google_imports import gql # Late import, to avoid name conflict. vals = [] for arg in args: if isinstance(arg, (int, long, basestring)): val = Parameter(arg) elif isinstance(arg, gql.Literal): val = arg.Get() else: raise TypeError('Unexpected arg (%r)' % arg) vals.append(val) if func == 'nop': if len(vals) != 1: raise TypeError('"nop" requires exactly one value') return vals[0] # May be a Parameter pfunc = ParameterizedFunction(func, vals) if pfunc.is_parameterized(): return pfunc else: return pfunc.resolve({}, {})
[ "def", "_args_to_val", "(", "func", ",", "args", ")", ":", "from", ".", "google_imports", "import", "gql", "# Late import, to avoid name conflict.", "vals", "=", "[", "]", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "(", "int", ",", "long", ",", "basestring", ")", ")", ":", "val", "=", "Parameter", "(", "arg", ")", "elif", "isinstance", "(", "arg", ",", "gql", ".", "Literal", ")", ":", "val", "=", "arg", ".", "Get", "(", ")", "else", ":", "raise", "TypeError", "(", "'Unexpected arg (%r)'", "%", "arg", ")", "vals", ".", "append", "(", "val", ")", "if", "func", "==", "'nop'", ":", "if", "len", "(", "vals", ")", "!=", "1", ":", "raise", "TypeError", "(", "'\"nop\" requires exactly one value'", ")", "return", "vals", "[", "0", "]", "# May be a Parameter", "pfunc", "=", "ParameterizedFunction", "(", "func", ",", "vals", ")", "if", "pfunc", ".", "is_parameterized", "(", ")", ":", "return", "pfunc", "else", ":", "return", "pfunc", ".", "resolve", "(", "{", "}", ",", "{", "}", ")" ]
Helper for GQL parsing to extract values from GQL expressions. This can extract the value from a GQL literal, return a Parameter for a GQL bound parameter (:1 or :foo), and interprets casts like KEY(...) and plain lists of values like (1, 2, 3). Args: func: A string indicating what kind of thing this is. args: One or more GQL values, each integer, string, or GQL literal.
[ "Helper", "for", "GQL", "parsing", "to", "extract", "values", "from", "GQL", "expressions", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L700-L729
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
_get_prop_from_modelclass
def _get_prop_from_modelclass(modelclass, name): """Helper for FQL parsing to turn a property name into a property object. Args: modelclass: The model class specified in the query. name: The property name. This may contain dots which indicate sub-properties of structured properties. Returns: A Property object. Raises: KeyError if the property doesn't exist and the model clas doesn't derive from Expando. """ if name == '__key__': return modelclass._key parts = name.split('.') part, more = parts[0], parts[1:] prop = modelclass._properties.get(part) if prop is None: if issubclass(modelclass, model.Expando): prop = model.GenericProperty(part) else: raise TypeError('Model %s has no property named %r' % (modelclass._get_kind(), part)) while more: part = more.pop(0) if not isinstance(prop, model.StructuredProperty): raise TypeError('Model %s has no property named %r' % (modelclass._get_kind(), part)) maybe = getattr(prop, part, None) if isinstance(maybe, model.Property) and maybe._name == part: prop = maybe else: maybe = prop._modelclass._properties.get(part) if maybe is not None: # Must get it this way to get the copy with the long name. # (See StructuredProperty.__getattr__() for details.) prop = getattr(prop, maybe._code_name) else: if issubclass(prop._modelclass, model.Expando) and not more: prop = model.GenericProperty() prop._name = name # Bypass the restriction on dots. else: raise KeyError('Model %s has no property named %r' % (prop._modelclass._get_kind(), part)) return prop
python
def _get_prop_from_modelclass(modelclass, name): """Helper for FQL parsing to turn a property name into a property object. Args: modelclass: The model class specified in the query. name: The property name. This may contain dots which indicate sub-properties of structured properties. Returns: A Property object. Raises: KeyError if the property doesn't exist and the model clas doesn't derive from Expando. """ if name == '__key__': return modelclass._key parts = name.split('.') part, more = parts[0], parts[1:] prop = modelclass._properties.get(part) if prop is None: if issubclass(modelclass, model.Expando): prop = model.GenericProperty(part) else: raise TypeError('Model %s has no property named %r' % (modelclass._get_kind(), part)) while more: part = more.pop(0) if not isinstance(prop, model.StructuredProperty): raise TypeError('Model %s has no property named %r' % (modelclass._get_kind(), part)) maybe = getattr(prop, part, None) if isinstance(maybe, model.Property) and maybe._name == part: prop = maybe else: maybe = prop._modelclass._properties.get(part) if maybe is not None: # Must get it this way to get the copy with the long name. # (See StructuredProperty.__getattr__() for details.) prop = getattr(prop, maybe._code_name) else: if issubclass(prop._modelclass, model.Expando) and not more: prop = model.GenericProperty() prop._name = name # Bypass the restriction on dots. else: raise KeyError('Model %s has no property named %r' % (prop._modelclass._get_kind(), part)) return prop
[ "def", "_get_prop_from_modelclass", "(", "modelclass", ",", "name", ")", ":", "if", "name", "==", "'__key__'", ":", "return", "modelclass", ".", "_key", "parts", "=", "name", ".", "split", "(", "'.'", ")", "part", ",", "more", "=", "parts", "[", "0", "]", ",", "parts", "[", "1", ":", "]", "prop", "=", "modelclass", ".", "_properties", ".", "get", "(", "part", ")", "if", "prop", "is", "None", ":", "if", "issubclass", "(", "modelclass", ",", "model", ".", "Expando", ")", ":", "prop", "=", "model", ".", "GenericProperty", "(", "part", ")", "else", ":", "raise", "TypeError", "(", "'Model %s has no property named %r'", "%", "(", "modelclass", ".", "_get_kind", "(", ")", ",", "part", ")", ")", "while", "more", ":", "part", "=", "more", ".", "pop", "(", "0", ")", "if", "not", "isinstance", "(", "prop", ",", "model", ".", "StructuredProperty", ")", ":", "raise", "TypeError", "(", "'Model %s has no property named %r'", "%", "(", "modelclass", ".", "_get_kind", "(", ")", ",", "part", ")", ")", "maybe", "=", "getattr", "(", "prop", ",", "part", ",", "None", ")", "if", "isinstance", "(", "maybe", ",", "model", ".", "Property", ")", "and", "maybe", ".", "_name", "==", "part", ":", "prop", "=", "maybe", "else", ":", "maybe", "=", "prop", ".", "_modelclass", ".", "_properties", ".", "get", "(", "part", ")", "if", "maybe", "is", "not", "None", ":", "# Must get it this way to get the copy with the long name.", "# (See StructuredProperty.__getattr__() for details.)", "prop", "=", "getattr", "(", "prop", ",", "maybe", ".", "_code_name", ")", "else", ":", "if", "issubclass", "(", "prop", ".", "_modelclass", ",", "model", ".", "Expando", ")", "and", "not", "more", ":", "prop", "=", "model", ".", "GenericProperty", "(", ")", "prop", ".", "_name", "=", "name", "# Bypass the restriction on dots.", "else", ":", "raise", "KeyError", "(", "'Model %s has no property named %r'", "%", "(", "prop", ".", "_modelclass", ".", "_get_kind", "(", ")", ",", "part", ")", ")", "return", "prop" ]
Helper for FQL parsing to turn a property name into a property object. Args: modelclass: The model class specified in the query. name: The property name. This may contain dots which indicate sub-properties of structured properties. Returns: A Property object. Raises: KeyError if the property doesn't exist and the model clas doesn't derive from Expando.
[ "Helper", "for", "FQL", "parsing", "to", "turn", "a", "property", "name", "into", "a", "property", "object", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L732-L782
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
gql
def gql(query_string, *args, **kwds): """Parse a GQL query string. Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. *args, **kwds: If present, used to call bind(). Returns: An instance of query_class. """ qry = _gql(query_string) if args or kwds: qry = qry._bind(args, kwds) return qry
python
def gql(query_string, *args, **kwds): """Parse a GQL query string. Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. *args, **kwds: If present, used to call bind(). Returns: An instance of query_class. """ qry = _gql(query_string) if args or kwds: qry = qry._bind(args, kwds) return qry
[ "def", "gql", "(", "query_string", ",", "*", "args", ",", "*", "*", "kwds", ")", ":", "qry", "=", "_gql", "(", "query_string", ")", "if", "args", "or", "kwds", ":", "qry", "=", "qry", ".", "_bind", "(", "args", ",", "kwds", ")", "return", "qry" ]
Parse a GQL query string. Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. *args, **kwds: If present, used to call bind(). Returns: An instance of query_class.
[ "Parse", "a", "GQL", "query", "string", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1501-L1514
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
_gql
def _gql(query_string, query_class=Query): """Parse a GQL query string (internal version). Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. query_class: Optional class to use, default Query. Returns: An instance of query_class. """ from .google_imports import gql # Late import, to avoid name conflict. gql_qry = gql.GQL(query_string) kind = gql_qry.kind() if kind is None: # The query must be lacking a "FROM <kind>" class. Let Expando # stand in for the model class (it won't actually be used to # construct the results). modelclass = model.Expando else: modelclass = model.Model._lookup_model( kind, tasklets.get_context()._conn.adapter.default_model) # Adjust kind to the kind of the model class. kind = modelclass._get_kind() ancestor = None flt = gql_qry.filters() filters = list(modelclass._default_filters()) for name_op in sorted(flt): name, op = name_op values = flt[name_op] op = op.lower() if op == 'is' and name == gql.GQL._GQL__ANCESTOR: if len(values) != 1: raise ValueError('"is" requires exactly one value') [(func, args)] = values ancestor = _args_to_val(func, args) continue if op not in _OPS: raise NotImplementedError('Operation %r is not supported.' % op) for (func, args) in values: val = _args_to_val(func, args) prop = _get_prop_from_modelclass(modelclass, name) if prop._name != name: raise RuntimeError('Whoa! _get_prop_from_modelclass(%s, %r) ' 'returned a property whose name is %r?!' % (modelclass.__name__, name, prop._name)) if isinstance(val, ParameterizedThing): node = ParameterNode(prop, op, val) elif op == 'in': node = prop._IN(val) else: node = prop._comparison(op, val) filters.append(node) if filters: filters = ConjunctionNode(*filters) else: filters = None orders = _orderings_to_orders(gql_qry.orderings(), modelclass) offset = gql_qry.offset() limit = gql_qry.limit() if limit < 0: limit = None keys_only = gql_qry._keys_only if not keys_only: keys_only = None options = QueryOptions(offset=offset, limit=limit, keys_only=keys_only) projection = gql_qry.projection() if gql_qry.is_distinct(): group_by = projection else: group_by = None qry = query_class(kind=kind, ancestor=ancestor, filters=filters, orders=orders, default_options=options, projection=projection, group_by=group_by) return qry
python
def _gql(query_string, query_class=Query): """Parse a GQL query string (internal version). Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. query_class: Optional class to use, default Query. Returns: An instance of query_class. """ from .google_imports import gql # Late import, to avoid name conflict. gql_qry = gql.GQL(query_string) kind = gql_qry.kind() if kind is None: # The query must be lacking a "FROM <kind>" class. Let Expando # stand in for the model class (it won't actually be used to # construct the results). modelclass = model.Expando else: modelclass = model.Model._lookup_model( kind, tasklets.get_context()._conn.adapter.default_model) # Adjust kind to the kind of the model class. kind = modelclass._get_kind() ancestor = None flt = gql_qry.filters() filters = list(modelclass._default_filters()) for name_op in sorted(flt): name, op = name_op values = flt[name_op] op = op.lower() if op == 'is' and name == gql.GQL._GQL__ANCESTOR: if len(values) != 1: raise ValueError('"is" requires exactly one value') [(func, args)] = values ancestor = _args_to_val(func, args) continue if op not in _OPS: raise NotImplementedError('Operation %r is not supported.' % op) for (func, args) in values: val = _args_to_val(func, args) prop = _get_prop_from_modelclass(modelclass, name) if prop._name != name: raise RuntimeError('Whoa! _get_prop_from_modelclass(%s, %r) ' 'returned a property whose name is %r?!' % (modelclass.__name__, name, prop._name)) if isinstance(val, ParameterizedThing): node = ParameterNode(prop, op, val) elif op == 'in': node = prop._IN(val) else: node = prop._comparison(op, val) filters.append(node) if filters: filters = ConjunctionNode(*filters) else: filters = None orders = _orderings_to_orders(gql_qry.orderings(), modelclass) offset = gql_qry.offset() limit = gql_qry.limit() if limit < 0: limit = None keys_only = gql_qry._keys_only if not keys_only: keys_only = None options = QueryOptions(offset=offset, limit=limit, keys_only=keys_only) projection = gql_qry.projection() if gql_qry.is_distinct(): group_by = projection else: group_by = None qry = query_class(kind=kind, ancestor=ancestor, filters=filters, orders=orders, default_options=options, projection=projection, group_by=group_by) return qry
[ "def", "_gql", "(", "query_string", ",", "query_class", "=", "Query", ")", ":", "from", ".", "google_imports", "import", "gql", "# Late import, to avoid name conflict.", "gql_qry", "=", "gql", ".", "GQL", "(", "query_string", ")", "kind", "=", "gql_qry", ".", "kind", "(", ")", "if", "kind", "is", "None", ":", "# The query must be lacking a \"FROM <kind>\" class. Let Expando", "# stand in for the model class (it won't actually be used to", "# construct the results).", "modelclass", "=", "model", ".", "Expando", "else", ":", "modelclass", "=", "model", ".", "Model", ".", "_lookup_model", "(", "kind", ",", "tasklets", ".", "get_context", "(", ")", ".", "_conn", ".", "adapter", ".", "default_model", ")", "# Adjust kind to the kind of the model class.", "kind", "=", "modelclass", ".", "_get_kind", "(", ")", "ancestor", "=", "None", "flt", "=", "gql_qry", ".", "filters", "(", ")", "filters", "=", "list", "(", "modelclass", ".", "_default_filters", "(", ")", ")", "for", "name_op", "in", "sorted", "(", "flt", ")", ":", "name", ",", "op", "=", "name_op", "values", "=", "flt", "[", "name_op", "]", "op", "=", "op", ".", "lower", "(", ")", "if", "op", "==", "'is'", "and", "name", "==", "gql", ".", "GQL", ".", "_GQL__ANCESTOR", ":", "if", "len", "(", "values", ")", "!=", "1", ":", "raise", "ValueError", "(", "'\"is\" requires exactly one value'", ")", "[", "(", "func", ",", "args", ")", "]", "=", "values", "ancestor", "=", "_args_to_val", "(", "func", ",", "args", ")", "continue", "if", "op", "not", "in", "_OPS", ":", "raise", "NotImplementedError", "(", "'Operation %r is not supported.'", "%", "op", ")", "for", "(", "func", ",", "args", ")", "in", "values", ":", "val", "=", "_args_to_val", "(", "func", ",", "args", ")", "prop", "=", "_get_prop_from_modelclass", "(", "modelclass", ",", "name", ")", "if", "prop", ".", "_name", "!=", "name", ":", "raise", "RuntimeError", "(", "'Whoa! _get_prop_from_modelclass(%s, %r) '", "'returned a property whose name is %r?!'", "%", "(", "modelclass", ".", "__name__", ",", "name", ",", "prop", ".", "_name", ")", ")", "if", "isinstance", "(", "val", ",", "ParameterizedThing", ")", ":", "node", "=", "ParameterNode", "(", "prop", ",", "op", ",", "val", ")", "elif", "op", "==", "'in'", ":", "node", "=", "prop", ".", "_IN", "(", "val", ")", "else", ":", "node", "=", "prop", ".", "_comparison", "(", "op", ",", "val", ")", "filters", ".", "append", "(", "node", ")", "if", "filters", ":", "filters", "=", "ConjunctionNode", "(", "*", "filters", ")", "else", ":", "filters", "=", "None", "orders", "=", "_orderings_to_orders", "(", "gql_qry", ".", "orderings", "(", ")", ",", "modelclass", ")", "offset", "=", "gql_qry", ".", "offset", "(", ")", "limit", "=", "gql_qry", ".", "limit", "(", ")", "if", "limit", "<", "0", ":", "limit", "=", "None", "keys_only", "=", "gql_qry", ".", "_keys_only", "if", "not", "keys_only", ":", "keys_only", "=", "None", "options", "=", "QueryOptions", "(", "offset", "=", "offset", ",", "limit", "=", "limit", ",", "keys_only", "=", "keys_only", ")", "projection", "=", "gql_qry", ".", "projection", "(", ")", "if", "gql_qry", ".", "is_distinct", "(", ")", ":", "group_by", "=", "projection", "else", ":", "group_by", "=", "None", "qry", "=", "query_class", "(", "kind", "=", "kind", ",", "ancestor", "=", "ancestor", ",", "filters", "=", "filters", ",", "orders", "=", "orders", ",", "default_options", "=", "options", ",", "projection", "=", "projection", ",", "group_by", "=", "group_by", ")", "return", "qry" ]
Parse a GQL query string (internal version). Args: query_string: Full GQL query, e.g. 'SELECT * FROM Kind WHERE prop = 1'. query_class: Optional class to use, default Query. Returns: An instance of query_class.
[ "Parse", "a", "GQL", "query", "string", "(", "internal", "version", ")", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1518-L1596
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
RepeatedStructuredPropertyPredicate._apply
def _apply(self, key_value_map): """Apply the filter to values extracted from an entity. Think of self.match_keys and self.match_values as representing a table with one row. For example: match_keys = ('name', 'age', 'rank') match_values = ('Joe', 24, 5) (Except that in reality, the values are represented by tuples produced by datastore_types.PropertyValueToKeyValue().) represents this table: | name | age | rank | +---------+-------+--------+ | 'Joe' | 24 | 5 | Think of key_value_map as a table with the same structure but (potentially) many rows. This represents a repeated structured property of a single entity. For example: {'name': ['Joe', 'Jane', 'Dick'], 'age': [24, 21, 23], 'rank': [5, 1, 2]} represents this table: | name | age | rank | +---------+-------+--------+ | 'Joe' | 24 | 5 | | 'Jane' | 21 | 1 | | 'Dick' | 23 | 2 | We must determine wheter at least one row of the second table exactly matches the first table. We need this class because the datastore, when asked to find an entity with name 'Joe', age 24 and rank 5, will include entities that have 'Joe' somewhere in the name column, 24 somewhere in the age column, and 5 somewhere in the rank column, but not all aligned on a single row. Such an entity should not be considered a match. """ columns = [] for key in self.match_keys: column = key_value_map.get(key) if not column: # None, or an empty list. return False # If any column is empty there can be no match. columns.append(column) # Use izip to transpose the columns into rows. return self.match_values in itertools.izip(*columns)
python
def _apply(self, key_value_map): """Apply the filter to values extracted from an entity. Think of self.match_keys and self.match_values as representing a table with one row. For example: match_keys = ('name', 'age', 'rank') match_values = ('Joe', 24, 5) (Except that in reality, the values are represented by tuples produced by datastore_types.PropertyValueToKeyValue().) represents this table: | name | age | rank | +---------+-------+--------+ | 'Joe' | 24 | 5 | Think of key_value_map as a table with the same structure but (potentially) many rows. This represents a repeated structured property of a single entity. For example: {'name': ['Joe', 'Jane', 'Dick'], 'age': [24, 21, 23], 'rank': [5, 1, 2]} represents this table: | name | age | rank | +---------+-------+--------+ | 'Joe' | 24 | 5 | | 'Jane' | 21 | 1 | | 'Dick' | 23 | 2 | We must determine wheter at least one row of the second table exactly matches the first table. We need this class because the datastore, when asked to find an entity with name 'Joe', age 24 and rank 5, will include entities that have 'Joe' somewhere in the name column, 24 somewhere in the age column, and 5 somewhere in the rank column, but not all aligned on a single row. Such an entity should not be considered a match. """ columns = [] for key in self.match_keys: column = key_value_map.get(key) if not column: # None, or an empty list. return False # If any column is empty there can be no match. columns.append(column) # Use izip to transpose the columns into rows. return self.match_values in itertools.izip(*columns)
[ "def", "_apply", "(", "self", ",", "key_value_map", ")", ":", "columns", "=", "[", "]", "for", "key", "in", "self", ".", "match_keys", ":", "column", "=", "key_value_map", ".", "get", "(", "key", ")", "if", "not", "column", ":", "# None, or an empty list.", "return", "False", "# If any column is empty there can be no match.", "columns", ".", "append", "(", "column", ")", "# Use izip to transpose the columns into rows.", "return", "self", ".", "match_values", "in", "itertools", ".", "izip", "(", "*", "columns", ")" ]
Apply the filter to values extracted from an entity. Think of self.match_keys and self.match_values as representing a table with one row. For example: match_keys = ('name', 'age', 'rank') match_values = ('Joe', 24, 5) (Except that in reality, the values are represented by tuples produced by datastore_types.PropertyValueToKeyValue().) represents this table: | name | age | rank | +---------+-------+--------+ | 'Joe' | 24 | 5 | Think of key_value_map as a table with the same structure but (potentially) many rows. This represents a repeated structured property of a single entity. For example: {'name': ['Joe', 'Jane', 'Dick'], 'age': [24, 21, 23], 'rank': [5, 1, 2]} represents this table: | name | age | rank | +---------+-------+--------+ | 'Joe' | 24 | 5 | | 'Jane' | 21 | 1 | | 'Dick' | 23 | 2 | We must determine wheter at least one row of the second table exactly matches the first table. We need this class because the datastore, when asked to find an entity with name 'Joe', age 24 and rank 5, will include entities that have 'Joe' somewhere in the name column, 24 somewhere in the age column, and 5 somewhere in the rank column, but not all aligned on a single row. Such an entity should not be considered a match.
[ "Apply", "the", "filter", "to", "values", "extracted", "from", "an", "entity", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L208-L257
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query._fix_namespace
def _fix_namespace(self): """Internal helper to fix the namespace. This is called to ensure that for queries without an explicit namespace, the namespace used by async calls is the one in effect at the time the async call is made, not the one in effect when the the request is actually generated. """ if self.namespace is not None: return self namespace = namespace_manager.get_namespace() return self.__class__(kind=self.kind, ancestor=self.ancestor, filters=self.filters, orders=self.orders, app=self.app, namespace=namespace, default_options=self.default_options, projection=self.projection, group_by=self.group_by)
python
def _fix_namespace(self): """Internal helper to fix the namespace. This is called to ensure that for queries without an explicit namespace, the namespace used by async calls is the one in effect at the time the async call is made, not the one in effect when the the request is actually generated. """ if self.namespace is not None: return self namespace = namespace_manager.get_namespace() return self.__class__(kind=self.kind, ancestor=self.ancestor, filters=self.filters, orders=self.orders, app=self.app, namespace=namespace, default_options=self.default_options, projection=self.projection, group_by=self.group_by)
[ "def", "_fix_namespace", "(", "self", ")", ":", "if", "self", ".", "namespace", "is", "not", "None", ":", "return", "self", "namespace", "=", "namespace_manager", ".", "get_namespace", "(", ")", "return", "self", ".", "__class__", "(", "kind", "=", "self", ".", "kind", ",", "ancestor", "=", "self", ".", "ancestor", ",", "filters", "=", "self", ".", "filters", ",", "orders", "=", "self", ".", "orders", ",", "app", "=", "self", ".", "app", ",", "namespace", "=", "namespace", ",", "default_options", "=", "self", ".", "default_options", ",", "projection", "=", "self", ".", "projection", ",", "group_by", "=", "self", ".", "group_by", ")" ]
Internal helper to fix the namespace. This is called to ensure that for queries without an explicit namespace, the namespace used by async calls is the one in effect at the time the async call is made, not the one in effect when the the request is actually generated.
[ "Internal", "helper", "to", "fix", "the", "namespace", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L912-L927
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.run_to_queue
def run_to_queue(self, queue, conn, options=None, dsquery=None): """Run this query, putting entities into the given queue.""" try: multiquery = self._maybe_multi_query() if multiquery is not None: yield multiquery.run_to_queue(queue, conn, options=options) return if dsquery is None: dsquery = self._get_query(conn) rpc = dsquery.run_async(conn, options) while rpc is not None: batch = yield rpc if (batch.skipped_results and datastore_query.FetchOptions.offset(options)): offset = options.offset - batch.skipped_results options = datastore_query.FetchOptions(offset=offset, config=options) rpc = batch.next_batch_async(options) for i, result in enumerate(batch.results): queue.putq((batch, i, result)) queue.complete() except GeneratorExit: raise except Exception: if not queue.done(): _, e, tb = sys.exc_info() queue.set_exception(e, tb) raise
python
def run_to_queue(self, queue, conn, options=None, dsquery=None): """Run this query, putting entities into the given queue.""" try: multiquery = self._maybe_multi_query() if multiquery is not None: yield multiquery.run_to_queue(queue, conn, options=options) return if dsquery is None: dsquery = self._get_query(conn) rpc = dsquery.run_async(conn, options) while rpc is not None: batch = yield rpc if (batch.skipped_results and datastore_query.FetchOptions.offset(options)): offset = options.offset - batch.skipped_results options = datastore_query.FetchOptions(offset=offset, config=options) rpc = batch.next_batch_async(options) for i, result in enumerate(batch.results): queue.putq((batch, i, result)) queue.complete() except GeneratorExit: raise except Exception: if not queue.done(): _, e, tb = sys.exc_info() queue.set_exception(e, tb) raise
[ "def", "run_to_queue", "(", "self", ",", "queue", ",", "conn", ",", "options", "=", "None", ",", "dsquery", "=", "None", ")", ":", "try", ":", "multiquery", "=", "self", ".", "_maybe_multi_query", "(", ")", "if", "multiquery", "is", "not", "None", ":", "yield", "multiquery", ".", "run_to_queue", "(", "queue", ",", "conn", ",", "options", "=", "options", ")", "return", "if", "dsquery", "is", "None", ":", "dsquery", "=", "self", ".", "_get_query", "(", "conn", ")", "rpc", "=", "dsquery", ".", "run_async", "(", "conn", ",", "options", ")", "while", "rpc", "is", "not", "None", ":", "batch", "=", "yield", "rpc", "if", "(", "batch", ".", "skipped_results", "and", "datastore_query", ".", "FetchOptions", ".", "offset", "(", "options", ")", ")", ":", "offset", "=", "options", ".", "offset", "-", "batch", ".", "skipped_results", "options", "=", "datastore_query", ".", "FetchOptions", "(", "offset", "=", "offset", ",", "config", "=", "options", ")", "rpc", "=", "batch", ".", "next_batch_async", "(", "options", ")", "for", "i", ",", "result", "in", "enumerate", "(", "batch", ".", "results", ")", ":", "queue", ".", "putq", "(", "(", "batch", ",", "i", ",", "result", ")", ")", "queue", ".", "complete", "(", ")", "except", "GeneratorExit", ":", "raise", "except", "Exception", ":", "if", "not", "queue", ".", "done", "(", ")", ":", "_", ",", "e", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "queue", ".", "set_exception", "(", "e", ",", "tb", ")", "raise" ]
Run this query, putting entities into the given queue.
[ "Run", "this", "query", "putting", "entities", "into", "the", "given", "queue", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L957-L985
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.is_distinct
def is_distinct(self): """True if results are guaranteed to contain a unique set of property values. This happens when every property in the group_by is also in the projection. """ return bool(self.__group_by and set(self._to_property_names(self.__group_by)) <= set(self._to_property_names(self.__projection)))
python
def is_distinct(self): """True if results are guaranteed to contain a unique set of property values. This happens when every property in the group_by is also in the projection. """ return bool(self.__group_by and set(self._to_property_names(self.__group_by)) <= set(self._to_property_names(self.__projection)))
[ "def", "is_distinct", "(", "self", ")", ":", "return", "bool", "(", "self", ".", "__group_by", "and", "set", "(", "self", ".", "_to_property_names", "(", "self", ".", "__group_by", ")", ")", "<=", "set", "(", "self", ".", "_to_property_names", "(", "self", ".", "__projection", ")", ")", ")" ]
True if results are guaranteed to contain a unique set of property values. This happens when every property in the group_by is also in the projection.
[ "True", "if", "results", "are", "guaranteed", "to", "contain", "a", "unique", "set", "of", "property", "values", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1074-L1082
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.filter
def filter(self, *args): """Return a new Query with additional filter(s) applied.""" if not args: return self preds = [] f = self.filters if f: preds.append(f) for arg in args: if not isinstance(arg, Node): raise TypeError('Cannot filter a non-Node argument; received %r' % arg) preds.append(arg) if not preds: pred = None elif len(preds) == 1: pred = preds[0] else: pred = ConjunctionNode(*preds) return self.__class__(kind=self.kind, ancestor=self.ancestor, filters=pred, orders=self.orders, app=self.app, namespace=self.namespace, default_options=self.default_options, projection=self.projection, group_by=self.group_by)
python
def filter(self, *args): """Return a new Query with additional filter(s) applied.""" if not args: return self preds = [] f = self.filters if f: preds.append(f) for arg in args: if not isinstance(arg, Node): raise TypeError('Cannot filter a non-Node argument; received %r' % arg) preds.append(arg) if not preds: pred = None elif len(preds) == 1: pred = preds[0] else: pred = ConjunctionNode(*preds) return self.__class__(kind=self.kind, ancestor=self.ancestor, filters=pred, orders=self.orders, app=self.app, namespace=self.namespace, default_options=self.default_options, projection=self.projection, group_by=self.group_by)
[ "def", "filter", "(", "self", ",", "*", "args", ")", ":", "if", "not", "args", ":", "return", "self", "preds", "=", "[", "]", "f", "=", "self", ".", "filters", "if", "f", ":", "preds", ".", "append", "(", "f", ")", "for", "arg", "in", "args", ":", "if", "not", "isinstance", "(", "arg", ",", "Node", ")", ":", "raise", "TypeError", "(", "'Cannot filter a non-Node argument; received %r'", "%", "arg", ")", "preds", ".", "append", "(", "arg", ")", "if", "not", "preds", ":", "pred", "=", "None", "elif", "len", "(", "preds", ")", "==", "1", ":", "pred", "=", "preds", "[", "0", "]", "else", ":", "pred", "=", "ConjunctionNode", "(", "*", "preds", ")", "return", "self", ".", "__class__", "(", "kind", "=", "self", ".", "kind", ",", "ancestor", "=", "self", ".", "ancestor", ",", "filters", "=", "pred", ",", "orders", "=", "self", ".", "orders", ",", "app", "=", "self", ".", "app", ",", "namespace", "=", "self", ".", "namespace", ",", "default_options", "=", "self", ".", "default_options", ",", "projection", "=", "self", ".", "projection", ",", "group_by", "=", "self", ".", "group_by", ")" ]
Return a new Query with additional filter(s) applied.
[ "Return", "a", "new", "Query", "with", "additional", "filter", "(", "s", ")", "applied", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1084-L1106
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.order
def order(self, *args): """Return a new Query with additional sort order(s) applied.""" # q.order(Employee.name, -Employee.age) if not args: return self orders = [] o = self.orders if o: orders.append(o) for arg in args: if isinstance(arg, model.Property): orders.append(datastore_query.PropertyOrder(arg._name, _ASC)) elif isinstance(arg, datastore_query.Order): orders.append(arg) else: raise TypeError('order() expects a Property or query Order; ' 'received %r' % arg) if not orders: orders = None elif len(orders) == 1: orders = orders[0] else: orders = datastore_query.CompositeOrder(orders) return self.__class__(kind=self.kind, ancestor=self.ancestor, filters=self.filters, orders=orders, app=self.app, namespace=self.namespace, default_options=self.default_options, projection=self.projection, group_by=self.group_by)
python
def order(self, *args): """Return a new Query with additional sort order(s) applied.""" # q.order(Employee.name, -Employee.age) if not args: return self orders = [] o = self.orders if o: orders.append(o) for arg in args: if isinstance(arg, model.Property): orders.append(datastore_query.PropertyOrder(arg._name, _ASC)) elif isinstance(arg, datastore_query.Order): orders.append(arg) else: raise TypeError('order() expects a Property or query Order; ' 'received %r' % arg) if not orders: orders = None elif len(orders) == 1: orders = orders[0] else: orders = datastore_query.CompositeOrder(orders) return self.__class__(kind=self.kind, ancestor=self.ancestor, filters=self.filters, orders=orders, app=self.app, namespace=self.namespace, default_options=self.default_options, projection=self.projection, group_by=self.group_by)
[ "def", "order", "(", "self", ",", "*", "args", ")", ":", "# q.order(Employee.name, -Employee.age)", "if", "not", "args", ":", "return", "self", "orders", "=", "[", "]", "o", "=", "self", ".", "orders", "if", "o", ":", "orders", ".", "append", "(", "o", ")", "for", "arg", "in", "args", ":", "if", "isinstance", "(", "arg", ",", "model", ".", "Property", ")", ":", "orders", ".", "append", "(", "datastore_query", ".", "PropertyOrder", "(", "arg", ".", "_name", ",", "_ASC", ")", ")", "elif", "isinstance", "(", "arg", ",", "datastore_query", ".", "Order", ")", ":", "orders", ".", "append", "(", "arg", ")", "else", ":", "raise", "TypeError", "(", "'order() expects a Property or query Order; '", "'received %r'", "%", "arg", ")", "if", "not", "orders", ":", "orders", "=", "None", "elif", "len", "(", "orders", ")", "==", "1", ":", "orders", "=", "orders", "[", "0", "]", "else", ":", "orders", "=", "datastore_query", ".", "CompositeOrder", "(", "orders", ")", "return", "self", ".", "__class__", "(", "kind", "=", "self", ".", "kind", ",", "ancestor", "=", "self", ".", "ancestor", ",", "filters", "=", "self", ".", "filters", ",", "orders", "=", "orders", ",", "app", "=", "self", ".", "app", ",", "namespace", "=", "self", ".", "namespace", ",", "default_options", "=", "self", ".", "default_options", ",", "projection", "=", "self", ".", "projection", ",", "group_by", "=", "self", ".", "group_by", ")" ]
Return a new Query with additional sort order(s) applied.
[ "Return", "a", "new", "Query", "with", "additional", "sort", "order", "(", "s", ")", "applied", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1108-L1135
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.map
def map(self, callback, pass_batch_into_callback=None, merge_future=None, **q_options): """Map a callback function or tasklet over the query results. Args: callback: A function or tasklet to be applied to each result; see below. merge_future: Optional Future subclass; see below. **q_options: All query options keyword arguments are supported. Callback signature: The callback is normally called with an entity as argument. However if keys_only=True is given, it is called with a Key. Also, when pass_batch_into_callback is True, it is called with three arguments: the current batch, the index within the batch, and the entity or Key at that index. The callback can return whatever it wants. If the callback is None, a trivial callback is assumed that just returns the entity or key passed in (ignoring produce_cursors). Optional merge future: The merge_future is an advanced argument that can be used to override how the callback results are combined into the overall map() return value. By default a list of callback return values is produced. By substituting one of a small number of specialized alternatives you can arrange otherwise. See tasklets.MultiFuture for the default implementation and a description of the protocol the merge_future object must implement the default. Alternatives from the same module include QueueFuture, SerialQueueFuture and ReducingFuture. Returns: When the query has run to completion and all callbacks have returned, map() returns a list of the results of all callbacks. (But see 'optional merge future' above.) """ return self.map_async(callback, pass_batch_into_callback=pass_batch_into_callback, merge_future=merge_future, **q_options).get_result()
python
def map(self, callback, pass_batch_into_callback=None, merge_future=None, **q_options): """Map a callback function or tasklet over the query results. Args: callback: A function or tasklet to be applied to each result; see below. merge_future: Optional Future subclass; see below. **q_options: All query options keyword arguments are supported. Callback signature: The callback is normally called with an entity as argument. However if keys_only=True is given, it is called with a Key. Also, when pass_batch_into_callback is True, it is called with three arguments: the current batch, the index within the batch, and the entity or Key at that index. The callback can return whatever it wants. If the callback is None, a trivial callback is assumed that just returns the entity or key passed in (ignoring produce_cursors). Optional merge future: The merge_future is an advanced argument that can be used to override how the callback results are combined into the overall map() return value. By default a list of callback return values is produced. By substituting one of a small number of specialized alternatives you can arrange otherwise. See tasklets.MultiFuture for the default implementation and a description of the protocol the merge_future object must implement the default. Alternatives from the same module include QueueFuture, SerialQueueFuture and ReducingFuture. Returns: When the query has run to completion and all callbacks have returned, map() returns a list of the results of all callbacks. (But see 'optional merge future' above.) """ return self.map_async(callback, pass_batch_into_callback=pass_batch_into_callback, merge_future=merge_future, **q_options).get_result()
[ "def", "map", "(", "self", ",", "callback", ",", "pass_batch_into_callback", "=", "None", ",", "merge_future", "=", "None", ",", "*", "*", "q_options", ")", ":", "return", "self", ".", "map_async", "(", "callback", ",", "pass_batch_into_callback", "=", "pass_batch_into_callback", ",", "merge_future", "=", "merge_future", ",", "*", "*", "q_options", ")", ".", "get_result", "(", ")" ]
Map a callback function or tasklet over the query results. Args: callback: A function or tasklet to be applied to each result; see below. merge_future: Optional Future subclass; see below. **q_options: All query options keyword arguments are supported. Callback signature: The callback is normally called with an entity as argument. However if keys_only=True is given, it is called with a Key. Also, when pass_batch_into_callback is True, it is called with three arguments: the current batch, the index within the batch, and the entity or Key at that index. The callback can return whatever it wants. If the callback is None, a trivial callback is assumed that just returns the entity or key passed in (ignoring produce_cursors). Optional merge future: The merge_future is an advanced argument that can be used to override how the callback results are combined into the overall map() return value. By default a list of callback return values is produced. By substituting one of a small number of specialized alternatives you can arrange otherwise. See tasklets.MultiFuture for the default implementation and a description of the protocol the merge_future object must implement the default. Alternatives from the same module include QueueFuture, SerialQueueFuture and ReducingFuture. Returns: When the query has run to completion and all callbacks have returned, map() returns a list of the results of all callbacks. (But see 'optional merge future' above.)
[ "Map", "a", "callback", "function", "or", "tasklet", "over", "the", "query", "results", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1154-L1190
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.map_async
def map_async(self, callback, pass_batch_into_callback=None, merge_future=None, **q_options): """Map a callback function or tasklet over the query results. This is the asynchronous version of Query.map(). """ qry = self._fix_namespace() return tasklets.get_context().map_query( qry, callback, pass_batch_into_callback=pass_batch_into_callback, options=self._make_options(q_options), merge_future=merge_future)
python
def map_async(self, callback, pass_batch_into_callback=None, merge_future=None, **q_options): """Map a callback function or tasklet over the query results. This is the asynchronous version of Query.map(). """ qry = self._fix_namespace() return tasklets.get_context().map_query( qry, callback, pass_batch_into_callback=pass_batch_into_callback, options=self._make_options(q_options), merge_future=merge_future)
[ "def", "map_async", "(", "self", ",", "callback", ",", "pass_batch_into_callback", "=", "None", ",", "merge_future", "=", "None", ",", "*", "*", "q_options", ")", ":", "qry", "=", "self", ".", "_fix_namespace", "(", ")", "return", "tasklets", ".", "get_context", "(", ")", ".", "map_query", "(", "qry", ",", "callback", ",", "pass_batch_into_callback", "=", "pass_batch_into_callback", ",", "options", "=", "self", ".", "_make_options", "(", "q_options", ")", ",", "merge_future", "=", "merge_future", ")" ]
Map a callback function or tasklet over the query results. This is the asynchronous version of Query.map().
[ "Map", "a", "callback", "function", "or", "tasklet", "over", "the", "query", "results", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1193-L1205
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.fetch_async
def fetch_async(self, limit=None, **q_options): """Fetch a list of query results, up to a limit. This is the asynchronous version of Query.fetch(). """ if limit is None: default_options = self._make_options(q_options) if default_options is not None and default_options.limit is not None: limit = default_options.limit else: limit = _MAX_LIMIT q_options['limit'] = limit q_options.setdefault('batch_size', limit) if self._needs_multi_query(): return self.map_async(None, **q_options) # Optimization using direct batches. options = self._make_options(q_options) qry = self._fix_namespace() return qry._run_to_list([], options=options)
python
def fetch_async(self, limit=None, **q_options): """Fetch a list of query results, up to a limit. This is the asynchronous version of Query.fetch(). """ if limit is None: default_options = self._make_options(q_options) if default_options is not None and default_options.limit is not None: limit = default_options.limit else: limit = _MAX_LIMIT q_options['limit'] = limit q_options.setdefault('batch_size', limit) if self._needs_multi_query(): return self.map_async(None, **q_options) # Optimization using direct batches. options = self._make_options(q_options) qry = self._fix_namespace() return qry._run_to_list([], options=options)
[ "def", "fetch_async", "(", "self", ",", "limit", "=", "None", ",", "*", "*", "q_options", ")", ":", "if", "limit", "is", "None", ":", "default_options", "=", "self", ".", "_make_options", "(", "q_options", ")", "if", "default_options", "is", "not", "None", "and", "default_options", ".", "limit", "is", "not", "None", ":", "limit", "=", "default_options", ".", "limit", "else", ":", "limit", "=", "_MAX_LIMIT", "q_options", "[", "'limit'", "]", "=", "limit", "q_options", ".", "setdefault", "(", "'batch_size'", ",", "limit", ")", "if", "self", ".", "_needs_multi_query", "(", ")", ":", "return", "self", ".", "map_async", "(", "None", ",", "*", "*", "q_options", ")", "# Optimization using direct batches.", "options", "=", "self", ".", "_make_options", "(", "q_options", ")", "qry", "=", "self", ".", "_fix_namespace", "(", ")", "return", "qry", ".", "_run_to_list", "(", "[", "]", ",", "options", "=", "options", ")" ]
Fetch a list of query results, up to a limit. This is the asynchronous version of Query.fetch().
[ "Fetch", "a", "list", "of", "query", "results", "up", "to", "a", "limit", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1221-L1239
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query._get_async
def _get_async(self, **q_options): """Internal version of get_async().""" res = yield self.fetch_async(1, **q_options) if not res: raise tasklets.Return(None) raise tasklets.Return(res[0])
python
def _get_async(self, **q_options): """Internal version of get_async().""" res = yield self.fetch_async(1, **q_options) if not res: raise tasklets.Return(None) raise tasklets.Return(res[0])
[ "def", "_get_async", "(", "self", ",", "*", "*", "q_options", ")", ":", "res", "=", "yield", "self", ".", "fetch_async", "(", "1", ",", "*", "*", "q_options", ")", "if", "not", "res", ":", "raise", "tasklets", ".", "Return", "(", "None", ")", "raise", "tasklets", ".", "Return", "(", "res", "[", "0", "]", ")" ]
Internal version of get_async().
[ "Internal", "version", "of", "get_async", "()", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1264-L1269
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.count_async
def count_async(self, limit=None, **q_options): """Count the number of query results, up to a limit. This is the asynchronous version of Query.count(). """ qry = self._fix_namespace() return qry._count_async(limit=limit, **q_options)
python
def count_async(self, limit=None, **q_options): """Count the number of query results, up to a limit. This is the asynchronous version of Query.count(). """ qry = self._fix_namespace() return qry._count_async(limit=limit, **q_options)
[ "def", "count_async", "(", "self", ",", "limit", "=", "None", ",", "*", "*", "q_options", ")", ":", "qry", "=", "self", ".", "_fix_namespace", "(", ")", "return", "qry", ".", "_count_async", "(", "limit", "=", "limit", ",", "*", "*", "q_options", ")" ]
Count the number of query results, up to a limit. This is the asynchronous version of Query.count().
[ "Count", "the", "number", "of", "query", "results", "up", "to", "a", "limit", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1290-L1296
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query._count_async
def _count_async(self, limit=None, **q_options): """Internal version of count_async().""" # TODO: Support offset by incorporating it to the limit. if 'offset' in q_options: raise NotImplementedError('.count() and .count_async() do not support ' 'offsets at present.') if 'limit' in q_options: raise TypeError('Cannot specify limit as a non-keyword argument and as a ' 'keyword argument simultaneously.') elif limit is None: limit = _MAX_LIMIT if self._needs_multi_query(): # _MultiQuery does not support iterating over result batches, # so just fetch results and count them. # TODO: Use QueryIterator to avoid materializing the results list. q_options.setdefault('batch_size', limit) q_options.setdefault('keys_only', True) results = yield self.fetch_async(limit, **q_options) raise tasklets.Return(len(results)) # Issue a special query requesting 0 results at a given offset. # The skipped_results count will tell us how many hits there were # before that offset without fetching the items. q_options['offset'] = limit q_options['limit'] = 0 options = self._make_options(q_options) conn = tasklets.get_context()._conn dsquery = self._get_query(conn) rpc = dsquery.run_async(conn, options) total = 0 while rpc is not None: batch = yield rpc options = QueryOptions(offset=options.offset - batch.skipped_results, config=options) rpc = batch.next_batch_async(options) total += batch.skipped_results raise tasklets.Return(total)
python
def _count_async(self, limit=None, **q_options): """Internal version of count_async().""" # TODO: Support offset by incorporating it to the limit. if 'offset' in q_options: raise NotImplementedError('.count() and .count_async() do not support ' 'offsets at present.') if 'limit' in q_options: raise TypeError('Cannot specify limit as a non-keyword argument and as a ' 'keyword argument simultaneously.') elif limit is None: limit = _MAX_LIMIT if self._needs_multi_query(): # _MultiQuery does not support iterating over result batches, # so just fetch results and count them. # TODO: Use QueryIterator to avoid materializing the results list. q_options.setdefault('batch_size', limit) q_options.setdefault('keys_only', True) results = yield self.fetch_async(limit, **q_options) raise tasklets.Return(len(results)) # Issue a special query requesting 0 results at a given offset. # The skipped_results count will tell us how many hits there were # before that offset without fetching the items. q_options['offset'] = limit q_options['limit'] = 0 options = self._make_options(q_options) conn = tasklets.get_context()._conn dsquery = self._get_query(conn) rpc = dsquery.run_async(conn, options) total = 0 while rpc is not None: batch = yield rpc options = QueryOptions(offset=options.offset - batch.skipped_results, config=options) rpc = batch.next_batch_async(options) total += batch.skipped_results raise tasklets.Return(total)
[ "def", "_count_async", "(", "self", ",", "limit", "=", "None", ",", "*", "*", "q_options", ")", ":", "# TODO: Support offset by incorporating it to the limit.", "if", "'offset'", "in", "q_options", ":", "raise", "NotImplementedError", "(", "'.count() and .count_async() do not support '", "'offsets at present.'", ")", "if", "'limit'", "in", "q_options", ":", "raise", "TypeError", "(", "'Cannot specify limit as a non-keyword argument and as a '", "'keyword argument simultaneously.'", ")", "elif", "limit", "is", "None", ":", "limit", "=", "_MAX_LIMIT", "if", "self", ".", "_needs_multi_query", "(", ")", ":", "# _MultiQuery does not support iterating over result batches,", "# so just fetch results and count them.", "# TODO: Use QueryIterator to avoid materializing the results list.", "q_options", ".", "setdefault", "(", "'batch_size'", ",", "limit", ")", "q_options", ".", "setdefault", "(", "'keys_only'", ",", "True", ")", "results", "=", "yield", "self", ".", "fetch_async", "(", "limit", ",", "*", "*", "q_options", ")", "raise", "tasklets", ".", "Return", "(", "len", "(", "results", ")", ")", "# Issue a special query requesting 0 results at a given offset.", "# The skipped_results count will tell us how many hits there were", "# before that offset without fetching the items.", "q_options", "[", "'offset'", "]", "=", "limit", "q_options", "[", "'limit'", "]", "=", "0", "options", "=", "self", ".", "_make_options", "(", "q_options", ")", "conn", "=", "tasklets", ".", "get_context", "(", ")", ".", "_conn", "dsquery", "=", "self", ".", "_get_query", "(", "conn", ")", "rpc", "=", "dsquery", ".", "run_async", "(", "conn", ",", "options", ")", "total", "=", "0", "while", "rpc", "is", "not", "None", ":", "batch", "=", "yield", "rpc", "options", "=", "QueryOptions", "(", "offset", "=", "options", ".", "offset", "-", "batch", ".", "skipped_results", ",", "config", "=", "options", ")", "rpc", "=", "batch", ".", "next_batch_async", "(", "options", ")", "total", "+=", "batch", ".", "skipped_results", "raise", "tasklets", ".", "Return", "(", "total", ")" ]
Internal version of count_async().
[ "Internal", "version", "of", "count_async", "()", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1299-L1335
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.fetch_page_async
def fetch_page_async(self, page_size, **q_options): """Fetch a page of results. This is the asynchronous version of Query.fetch_page(). """ qry = self._fix_namespace() return qry._fetch_page_async(page_size, **q_options)
python
def fetch_page_async(self, page_size, **q_options): """Fetch a page of results. This is the asynchronous version of Query.fetch_page(). """ qry = self._fix_namespace() return qry._fetch_page_async(page_size, **q_options)
[ "def", "fetch_page_async", "(", "self", ",", "page_size", ",", "*", "*", "q_options", ")", ":", "qry", "=", "self", ".", "_fix_namespace", "(", ")", "return", "qry", ".", "_fetch_page_async", "(", "page_size", ",", "*", "*", "q_options", ")" ]
Fetch a page of results. This is the asynchronous version of Query.fetch_page().
[ "Fetch", "a", "page", "of", "results", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1365-L1371
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query._fetch_page_async
def _fetch_page_async(self, page_size, **q_options): """Internal version of fetch_page_async().""" q_options.setdefault('batch_size', page_size) q_options.setdefault('produce_cursors', True) it = self.iter(limit=page_size + 1, **q_options) results = [] while (yield it.has_next_async()): results.append(it.next()) if len(results) >= page_size: break try: cursor = it.cursor_after() except datastore_errors.BadArgumentError: cursor = None raise tasklets.Return(results, cursor, it.probably_has_next())
python
def _fetch_page_async(self, page_size, **q_options): """Internal version of fetch_page_async().""" q_options.setdefault('batch_size', page_size) q_options.setdefault('produce_cursors', True) it = self.iter(limit=page_size + 1, **q_options) results = [] while (yield it.has_next_async()): results.append(it.next()) if len(results) >= page_size: break try: cursor = it.cursor_after() except datastore_errors.BadArgumentError: cursor = None raise tasklets.Return(results, cursor, it.probably_has_next())
[ "def", "_fetch_page_async", "(", "self", ",", "page_size", ",", "*", "*", "q_options", ")", ":", "q_options", ".", "setdefault", "(", "'batch_size'", ",", "page_size", ")", "q_options", ".", "setdefault", "(", "'produce_cursors'", ",", "True", ")", "it", "=", "self", ".", "iter", "(", "limit", "=", "page_size", "+", "1", ",", "*", "*", "q_options", ")", "results", "=", "[", "]", "while", "(", "yield", "it", ".", "has_next_async", "(", ")", ")", ":", "results", ".", "append", "(", "it", ".", "next", "(", ")", ")", "if", "len", "(", "results", ")", ">=", "page_size", ":", "break", "try", ":", "cursor", "=", "it", ".", "cursor_after", "(", ")", "except", "datastore_errors", ".", "BadArgumentError", ":", "cursor", "=", "None", "raise", "tasklets", ".", "Return", "(", "results", ",", "cursor", ",", "it", ".", "probably_has_next", "(", ")", ")" ]
Internal version of fetch_page_async().
[ "Internal", "version", "of", "fetch_page_async", "()", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1374-L1388
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query._make_options
def _make_options(self, q_options): """Helper to construct a QueryOptions object from keyword arguments. Args: q_options: a dict of keyword arguments. Note that either 'options' or 'config' can be used to pass another QueryOptions object, but not both. If another QueryOptions object is given it provides default values. If self.default_options is set, it is used to provide defaults, which have a lower precedence than options set in q_options. Returns: A QueryOptions object, or None if q_options is empty. """ if not (q_options or self.__projection): return self.default_options if 'options' in q_options: # Move 'options' to 'config' since that is what QueryOptions() uses. if 'config' in q_options: raise TypeError('You cannot use config= and options= at the same time') q_options['config'] = q_options.pop('options') if q_options.get('projection'): try: q_options['projection'] = self._to_property_names( q_options['projection']) except TypeError, e: raise datastore_errors.BadArgumentError(e) self._check_properties(q_options['projection']) options = QueryOptions(**q_options) # Populate projection if it hasn't been overridden. if (options.keys_only is None and options.projection is None and self.__projection): options = QueryOptions( projection=self._to_property_names(self.__projection), config=options) # Populate default options if self.default_options is not None: options = self.default_options.merge(options) return options
python
def _make_options(self, q_options): """Helper to construct a QueryOptions object from keyword arguments. Args: q_options: a dict of keyword arguments. Note that either 'options' or 'config' can be used to pass another QueryOptions object, but not both. If another QueryOptions object is given it provides default values. If self.default_options is set, it is used to provide defaults, which have a lower precedence than options set in q_options. Returns: A QueryOptions object, or None if q_options is empty. """ if not (q_options or self.__projection): return self.default_options if 'options' in q_options: # Move 'options' to 'config' since that is what QueryOptions() uses. if 'config' in q_options: raise TypeError('You cannot use config= and options= at the same time') q_options['config'] = q_options.pop('options') if q_options.get('projection'): try: q_options['projection'] = self._to_property_names( q_options['projection']) except TypeError, e: raise datastore_errors.BadArgumentError(e) self._check_properties(q_options['projection']) options = QueryOptions(**q_options) # Populate projection if it hasn't been overridden. if (options.keys_only is None and options.projection is None and self.__projection): options = QueryOptions( projection=self._to_property_names(self.__projection), config=options) # Populate default options if self.default_options is not None: options = self.default_options.merge(options) return options
[ "def", "_make_options", "(", "self", ",", "q_options", ")", ":", "if", "not", "(", "q_options", "or", "self", ".", "__projection", ")", ":", "return", "self", ".", "default_options", "if", "'options'", "in", "q_options", ":", "# Move 'options' to 'config' since that is what QueryOptions() uses.", "if", "'config'", "in", "q_options", ":", "raise", "TypeError", "(", "'You cannot use config= and options= at the same time'", ")", "q_options", "[", "'config'", "]", "=", "q_options", ".", "pop", "(", "'options'", ")", "if", "q_options", ".", "get", "(", "'projection'", ")", ":", "try", ":", "q_options", "[", "'projection'", "]", "=", "self", ".", "_to_property_names", "(", "q_options", "[", "'projection'", "]", ")", "except", "TypeError", ",", "e", ":", "raise", "datastore_errors", ".", "BadArgumentError", "(", "e", ")", "self", ".", "_check_properties", "(", "q_options", "[", "'projection'", "]", ")", "options", "=", "QueryOptions", "(", "*", "*", "q_options", ")", "# Populate projection if it hasn't been overridden.", "if", "(", "options", ".", "keys_only", "is", "None", "and", "options", ".", "projection", "is", "None", "and", "self", ".", "__projection", ")", ":", "options", "=", "QueryOptions", "(", "projection", "=", "self", ".", "_to_property_names", "(", "self", ".", "__projection", ")", ",", "config", "=", "options", ")", "# Populate default options", "if", "self", ".", "default_options", "is", "not", "None", ":", "options", "=", "self", ".", "default_options", ".", "merge", "(", "options", ")", "return", "options" ]
Helper to construct a QueryOptions object from keyword arguments. Args: q_options: a dict of keyword arguments. Note that either 'options' or 'config' can be used to pass another QueryOptions object, but not both. If another QueryOptions object is given it provides default values. If self.default_options is set, it is used to provide defaults, which have a lower precedence than options set in q_options. Returns: A QueryOptions object, or None if q_options is empty.
[ "Helper", "to", "construct", "a", "QueryOptions", "object", "from", "keyword", "arguments", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1390-L1432
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query.analyze
def analyze(self): """Return a list giving the parameters required by a query.""" class MockBindings(dict): def __contains__(self, key): self[key] = None return True bindings = MockBindings() used = {} ancestor = self.ancestor if isinstance(ancestor, ParameterizedThing): ancestor = ancestor.resolve(bindings, used) filters = self.filters if filters is not None: filters = filters.resolve(bindings, used) return sorted(used)
python
def analyze(self): """Return a list giving the parameters required by a query.""" class MockBindings(dict): def __contains__(self, key): self[key] = None return True bindings = MockBindings() used = {} ancestor = self.ancestor if isinstance(ancestor, ParameterizedThing): ancestor = ancestor.resolve(bindings, used) filters = self.filters if filters is not None: filters = filters.resolve(bindings, used) return sorted(used)
[ "def", "analyze", "(", "self", ")", ":", "class", "MockBindings", "(", "dict", ")", ":", "def", "__contains__", "(", "self", ",", "key", ")", ":", "self", "[", "key", "]", "=", "None", "return", "True", "bindings", "=", "MockBindings", "(", ")", "used", "=", "{", "}", "ancestor", "=", "self", ".", "ancestor", "if", "isinstance", "(", "ancestor", ",", "ParameterizedThing", ")", ":", "ancestor", "=", "ancestor", ".", "resolve", "(", "bindings", ",", "used", ")", "filters", "=", "self", ".", "filters", "if", "filters", "is", "not", "None", ":", "filters", "=", "filters", ".", "resolve", "(", "bindings", ",", "used", ")", "return", "sorted", "(", "used", ")" ]
Return a list giving the parameters required by a query.
[ "Return", "a", "list", "giving", "the", "parameters", "required", "by", "a", "query", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1453-L1468
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
Query._bind
def _bind(self, args, kwds): """Bind parameter values. Returns a new Query object.""" bindings = dict(kwds) for i, arg in enumerate(args): bindings[i + 1] = arg used = {} ancestor = self.ancestor if isinstance(ancestor, ParameterizedThing): ancestor = ancestor.resolve(bindings, used) filters = self.filters if filters is not None: filters = filters.resolve(bindings, used) unused = [] for i in xrange(1, 1 + len(args)): if i not in used: unused.append(i) if unused: raise datastore_errors.BadArgumentError( 'Positional arguments %s were given but not used.' % ', '.join(str(i) for i in unused)) return self.__class__(kind=self.kind, ancestor=ancestor, filters=filters, orders=self.orders, app=self.app, namespace=self.namespace, default_options=self.default_options, projection=self.projection, group_by=self.group_by)
python
def _bind(self, args, kwds): """Bind parameter values. Returns a new Query object.""" bindings = dict(kwds) for i, arg in enumerate(args): bindings[i + 1] = arg used = {} ancestor = self.ancestor if isinstance(ancestor, ParameterizedThing): ancestor = ancestor.resolve(bindings, used) filters = self.filters if filters is not None: filters = filters.resolve(bindings, used) unused = [] for i in xrange(1, 1 + len(args)): if i not in used: unused.append(i) if unused: raise datastore_errors.BadArgumentError( 'Positional arguments %s were given but not used.' % ', '.join(str(i) for i in unused)) return self.__class__(kind=self.kind, ancestor=ancestor, filters=filters, orders=self.orders, app=self.app, namespace=self.namespace, default_options=self.default_options, projection=self.projection, group_by=self.group_by)
[ "def", "_bind", "(", "self", ",", "args", ",", "kwds", ")", ":", "bindings", "=", "dict", "(", "kwds", ")", "for", "i", ",", "arg", "in", "enumerate", "(", "args", ")", ":", "bindings", "[", "i", "+", "1", "]", "=", "arg", "used", "=", "{", "}", "ancestor", "=", "self", ".", "ancestor", "if", "isinstance", "(", "ancestor", ",", "ParameterizedThing", ")", ":", "ancestor", "=", "ancestor", ".", "resolve", "(", "bindings", ",", "used", ")", "filters", "=", "self", ".", "filters", "if", "filters", "is", "not", "None", ":", "filters", "=", "filters", ".", "resolve", "(", "bindings", ",", "used", ")", "unused", "=", "[", "]", "for", "i", "in", "xrange", "(", "1", ",", "1", "+", "len", "(", "args", ")", ")", ":", "if", "i", "not", "in", "used", ":", "unused", ".", "append", "(", "i", ")", "if", "unused", ":", "raise", "datastore_errors", ".", "BadArgumentError", "(", "'Positional arguments %s were given but not used.'", "%", "', '", ".", "join", "(", "str", "(", "i", ")", "for", "i", "in", "unused", ")", ")", "return", "self", ".", "__class__", "(", "kind", "=", "self", ".", "kind", ",", "ancestor", "=", "ancestor", ",", "filters", "=", "filters", ",", "orders", "=", "self", ".", "orders", ",", "app", "=", "self", ".", "app", ",", "namespace", "=", "self", ".", "namespace", ",", "default_options", "=", "self", ".", "default_options", ",", "projection", "=", "self", ".", "projection", ",", "group_by", "=", "self", ".", "group_by", ")" ]
Bind parameter values. Returns a new Query object.
[ "Bind", "parameter", "values", ".", "Returns", "a", "new", "Query", "object", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1474-L1498
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
QueryIterator.cursor_before
def cursor_before(self): """Return the cursor before the current item. You must pass a QueryOptions object with produce_cursors=True for this to work. If there is no cursor or no current item, raise BadArgumentError. Before next() has returned there is no cursor. Once the loop is exhausted, this returns the cursor after the last item. """ if self._exhausted: return self.cursor_after() if isinstance(self._cursor_before, BaseException): raise self._cursor_before return self._cursor_before
python
def cursor_before(self): """Return the cursor before the current item. You must pass a QueryOptions object with produce_cursors=True for this to work. If there is no cursor or no current item, raise BadArgumentError. Before next() has returned there is no cursor. Once the loop is exhausted, this returns the cursor after the last item. """ if self._exhausted: return self.cursor_after() if isinstance(self._cursor_before, BaseException): raise self._cursor_before return self._cursor_before
[ "def", "cursor_before", "(", "self", ")", ":", "if", "self", ".", "_exhausted", ":", "return", "self", ".", "cursor_after", "(", ")", "if", "isinstance", "(", "self", ".", "_cursor_before", ",", "BaseException", ")", ":", "raise", "self", ".", "_cursor_before", "return", "self", ".", "_cursor_before" ]
Return the cursor before the current item. You must pass a QueryOptions object with produce_cursors=True for this to work. If there is no cursor or no current item, raise BadArgumentError. Before next() has returned there is no cursor. Once the loop is exhausted, this returns the cursor after the last item.
[ "Return", "the", "cursor", "before", "the", "current", "item", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1693-L1707
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
QueryIterator.cursor_after
def cursor_after(self): """Return the cursor after the current item. You must pass a QueryOptions object with produce_cursors=True for this to work. If there is no cursor or no current item, raise BadArgumentError. Before next() has returned there is no cursor. Once the loop is exhausted, this returns the cursor after the last item. """ if isinstance(self._cursor_after, BaseException): raise self._cursor_after return self._cursor_after
python
def cursor_after(self): """Return the cursor after the current item. You must pass a QueryOptions object with produce_cursors=True for this to work. If there is no cursor or no current item, raise BadArgumentError. Before next() has returned there is no cursor. Once the loop is exhausted, this returns the cursor after the last item. """ if isinstance(self._cursor_after, BaseException): raise self._cursor_after return self._cursor_after
[ "def", "cursor_after", "(", "self", ")", ":", "if", "isinstance", "(", "self", ".", "_cursor_after", ",", "BaseException", ")", ":", "raise", "self", ".", "_cursor_after", "return", "self", ".", "_cursor_after" ]
Return the cursor after the current item. You must pass a QueryOptions object with produce_cursors=True for this to work. If there is no cursor or no current item, raise BadArgumentError. Before next() has returned there is no cursor. Once the loop is exhausted, this returns the cursor after the last item.
[ "Return", "the", "cursor", "after", "the", "current", "item", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1709-L1721
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
QueryIterator.has_next_async
def has_next_async(self): """Return a Future whose result will say whether a next item is available. See the module docstring for the usage pattern. """ if self._fut is None: self._fut = self._iter.getq() flag = True try: yield self._fut except EOFError: flag = False raise tasklets.Return(flag)
python
def has_next_async(self): """Return a Future whose result will say whether a next item is available. See the module docstring for the usage pattern. """ if self._fut is None: self._fut = self._iter.getq() flag = True try: yield self._fut except EOFError: flag = False raise tasklets.Return(flag)
[ "def", "has_next_async", "(", "self", ")", ":", "if", "self", ".", "_fut", "is", "None", ":", "self", ".", "_fut", "=", "self", ".", "_iter", ".", "getq", "(", ")", "flag", "=", "True", "try", ":", "yield", "self", ".", "_fut", "except", "EOFError", ":", "flag", "=", "False", "raise", "tasklets", ".", "Return", "(", "flag", ")" ]
Return a Future whose result will say whether a next item is available. See the module docstring for the usage pattern.
[ "Return", "a", "Future", "whose", "result", "will", "say", "whether", "a", "next", "item", "is", "available", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1784-L1796
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
QueryIterator.next
def next(self): """Iterator protocol: get next item or raise StopIteration.""" if self._fut is None: self._fut = self._iter.getq() try: try: # The future result is set by this class's _extended_callback # method. # pylint: disable=unpacking-non-sequence (ent, self._cursor_before, self._cursor_after, self._more_results) = self._fut.get_result() return ent except EOFError: self._exhausted = True raise StopIteration finally: self._fut = None
python
def next(self): """Iterator protocol: get next item or raise StopIteration.""" if self._fut is None: self._fut = self._iter.getq() try: try: # The future result is set by this class's _extended_callback # method. # pylint: disable=unpacking-non-sequence (ent, self._cursor_before, self._cursor_after, self._more_results) = self._fut.get_result() return ent except EOFError: self._exhausted = True raise StopIteration finally: self._fut = None
[ "def", "next", "(", "self", ")", ":", "if", "self", ".", "_fut", "is", "None", ":", "self", ".", "_fut", "=", "self", ".", "_iter", ".", "getq", "(", ")", "try", ":", "try", ":", "# The future result is set by this class's _extended_callback", "# method.", "# pylint: disable=unpacking-non-sequence", "(", "ent", ",", "self", ".", "_cursor_before", ",", "self", ".", "_cursor_after", ",", "self", ".", "_more_results", ")", "=", "self", ".", "_fut", ".", "get_result", "(", ")", "return", "ent", "except", "EOFError", ":", "self", ".", "_exhausted", "=", "True", "raise", "StopIteration", "finally", ":", "self", ".", "_fut", "=", "None" ]
Iterator protocol: get next item or raise StopIteration.
[ "Iterator", "protocol", ":", "get", "next", "item", "or", "raise", "StopIteration", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1798-L1816
GoogleCloudPlatform/datastore-ndb-python
ndb/query.py
_MultiQuery.run_to_queue
def run_to_queue(self, queue, conn, options=None): """Run this query, putting entities into the given queue.""" if options is None: # Default options. offset = None limit = None keys_only = None else: # Capture options we need to simulate. offset = options.offset limit = options.limit keys_only = options.keys_only # Cursors are supported for certain orders only. if (options.start_cursor or options.end_cursor or options.produce_cursors): names = set() if self.__orders is not None: names = self.__orders._get_prop_names() if '__key__' not in names: raise datastore_errors.BadArgumentError( '_MultiQuery with cursors requires __key__ order') # Decide if we need to modify the options passed to subqueries. # NOTE: It would seem we can sometimes let Cloud Datastore handle # the offset natively, but this would thwart the duplicate key # detection, so we always have to emulate the offset here. # We can set the limit we pass along to offset + limit though, # since that is the maximum number of results from a single # subquery we will ever have to consider. modifiers = {} if offset: modifiers['offset'] = None if limit is not None: modifiers['limit'] = min(_MAX_LIMIT, offset + limit) if keys_only and self.__orders is not None: modifiers['keys_only'] = None if modifiers: options = QueryOptions(config=options, **modifiers) if offset is None: offset = 0 if limit is None: limit = _MAX_LIMIT if self.__orders is None: # Run the subqueries sequentially; there is no order to keep. keys_seen = set() for subq in self.__subqueries: if limit <= 0: break subit = tasklets.SerialQueueFuture('_MultiQuery.run_to_queue[ser]') subq.run_to_queue(subit, conn, options=options) while limit > 0: try: batch, index, result = yield subit.getq() except EOFError: break if keys_only: key = result else: key = result._key if key not in keys_seen: keys_seen.add(key) if offset > 0: offset -= 1 else: limit -= 1 queue.putq((None, None, result)) queue.complete() return # This with-statement causes the adapter to set _orig_pb on all # entities it converts from protobuf. # TODO: Does this interact properly with the cache? with conn.adapter: # Start running all the sub-queries. todo = [] # List of (subit, dsquery) tuples. for subq in self.__subqueries: dsquery = subq._get_query(conn) subit = tasklets.SerialQueueFuture('_MultiQuery.run_to_queue[par]') subq.run_to_queue(subit, conn, options=options, dsquery=dsquery) todo.append((subit, dsquery)) # Create a list of (first-entity, subquery-iterator) tuples. state = [] # List of _SubQueryIteratorState instances. for subit, dsquery in todo: try: thing = yield subit.getq() except EOFError: continue else: state.append(_SubQueryIteratorState(thing, subit, dsquery, self.__orders)) # Now turn it into a sorted heap. The heapq module claims that # calling heapify() is more efficient than calling heappush() for # each item. heapq.heapify(state) # Repeatedly yield the lowest entity from the state vector, # filtering duplicates. This is essentially a multi-way merge # sort. One would think it should be possible to filter # duplicates simply by dropping other entities already in the # state vector that are equal to the lowest entity, but because of # the weird sorting of repeated properties, we have to explicitly # keep a set of all keys, so we can remove later occurrences. # Note that entities will still be sorted correctly, within the # constraints given by the sort order. keys_seen = set() while state and limit > 0: item = heapq.heappop(state) batch = item.batch index = item.index entity = item.entity key = entity._key if key not in keys_seen: keys_seen.add(key) if offset > 0: offset -= 1 else: limit -= 1 if keys_only: queue.putq((batch, index, key)) else: queue.putq((batch, index, entity)) subit = item.iterator try: batch, index, entity = yield subit.getq() except EOFError: pass else: item.batch = batch item.index = index item.entity = entity heapq.heappush(state, item) queue.complete()
python
def run_to_queue(self, queue, conn, options=None): """Run this query, putting entities into the given queue.""" if options is None: # Default options. offset = None limit = None keys_only = None else: # Capture options we need to simulate. offset = options.offset limit = options.limit keys_only = options.keys_only # Cursors are supported for certain orders only. if (options.start_cursor or options.end_cursor or options.produce_cursors): names = set() if self.__orders is not None: names = self.__orders._get_prop_names() if '__key__' not in names: raise datastore_errors.BadArgumentError( '_MultiQuery with cursors requires __key__ order') # Decide if we need to modify the options passed to subqueries. # NOTE: It would seem we can sometimes let Cloud Datastore handle # the offset natively, but this would thwart the duplicate key # detection, so we always have to emulate the offset here. # We can set the limit we pass along to offset + limit though, # since that is the maximum number of results from a single # subquery we will ever have to consider. modifiers = {} if offset: modifiers['offset'] = None if limit is not None: modifiers['limit'] = min(_MAX_LIMIT, offset + limit) if keys_only and self.__orders is not None: modifiers['keys_only'] = None if modifiers: options = QueryOptions(config=options, **modifiers) if offset is None: offset = 0 if limit is None: limit = _MAX_LIMIT if self.__orders is None: # Run the subqueries sequentially; there is no order to keep. keys_seen = set() for subq in self.__subqueries: if limit <= 0: break subit = tasklets.SerialQueueFuture('_MultiQuery.run_to_queue[ser]') subq.run_to_queue(subit, conn, options=options) while limit > 0: try: batch, index, result = yield subit.getq() except EOFError: break if keys_only: key = result else: key = result._key if key not in keys_seen: keys_seen.add(key) if offset > 0: offset -= 1 else: limit -= 1 queue.putq((None, None, result)) queue.complete() return # This with-statement causes the adapter to set _orig_pb on all # entities it converts from protobuf. # TODO: Does this interact properly with the cache? with conn.adapter: # Start running all the sub-queries. todo = [] # List of (subit, dsquery) tuples. for subq in self.__subqueries: dsquery = subq._get_query(conn) subit = tasklets.SerialQueueFuture('_MultiQuery.run_to_queue[par]') subq.run_to_queue(subit, conn, options=options, dsquery=dsquery) todo.append((subit, dsquery)) # Create a list of (first-entity, subquery-iterator) tuples. state = [] # List of _SubQueryIteratorState instances. for subit, dsquery in todo: try: thing = yield subit.getq() except EOFError: continue else: state.append(_SubQueryIteratorState(thing, subit, dsquery, self.__orders)) # Now turn it into a sorted heap. The heapq module claims that # calling heapify() is more efficient than calling heappush() for # each item. heapq.heapify(state) # Repeatedly yield the lowest entity from the state vector, # filtering duplicates. This is essentially a multi-way merge # sort. One would think it should be possible to filter # duplicates simply by dropping other entities already in the # state vector that are equal to the lowest entity, but because of # the weird sorting of repeated properties, we have to explicitly # keep a set of all keys, so we can remove later occurrences. # Note that entities will still be sorted correctly, within the # constraints given by the sort order. keys_seen = set() while state and limit > 0: item = heapq.heappop(state) batch = item.batch index = item.index entity = item.entity key = entity._key if key not in keys_seen: keys_seen.add(key) if offset > 0: offset -= 1 else: limit -= 1 if keys_only: queue.putq((batch, index, key)) else: queue.putq((batch, index, entity)) subit = item.iterator try: batch, index, entity = yield subit.getq() except EOFError: pass else: item.batch = batch item.index = index item.entity = entity heapq.heappush(state, item) queue.complete()
[ "def", "run_to_queue", "(", "self", ",", "queue", ",", "conn", ",", "options", "=", "None", ")", ":", "if", "options", "is", "None", ":", "# Default options.", "offset", "=", "None", "limit", "=", "None", "keys_only", "=", "None", "else", ":", "# Capture options we need to simulate.", "offset", "=", "options", ".", "offset", "limit", "=", "options", ".", "limit", "keys_only", "=", "options", ".", "keys_only", "# Cursors are supported for certain orders only.", "if", "(", "options", ".", "start_cursor", "or", "options", ".", "end_cursor", "or", "options", ".", "produce_cursors", ")", ":", "names", "=", "set", "(", ")", "if", "self", ".", "__orders", "is", "not", "None", ":", "names", "=", "self", ".", "__orders", ".", "_get_prop_names", "(", ")", "if", "'__key__'", "not", "in", "names", ":", "raise", "datastore_errors", ".", "BadArgumentError", "(", "'_MultiQuery with cursors requires __key__ order'", ")", "# Decide if we need to modify the options passed to subqueries.", "# NOTE: It would seem we can sometimes let Cloud Datastore handle", "# the offset natively, but this would thwart the duplicate key", "# detection, so we always have to emulate the offset here.", "# We can set the limit we pass along to offset + limit though,", "# since that is the maximum number of results from a single", "# subquery we will ever have to consider.", "modifiers", "=", "{", "}", "if", "offset", ":", "modifiers", "[", "'offset'", "]", "=", "None", "if", "limit", "is", "not", "None", ":", "modifiers", "[", "'limit'", "]", "=", "min", "(", "_MAX_LIMIT", ",", "offset", "+", "limit", ")", "if", "keys_only", "and", "self", ".", "__orders", "is", "not", "None", ":", "modifiers", "[", "'keys_only'", "]", "=", "None", "if", "modifiers", ":", "options", "=", "QueryOptions", "(", "config", "=", "options", ",", "*", "*", "modifiers", ")", "if", "offset", "is", "None", ":", "offset", "=", "0", "if", "limit", "is", "None", ":", "limit", "=", "_MAX_LIMIT", "if", "self", ".", "__orders", "is", "None", ":", "# Run the subqueries sequentially; there is no order to keep.", "keys_seen", "=", "set", "(", ")", "for", "subq", "in", "self", ".", "__subqueries", ":", "if", "limit", "<=", "0", ":", "break", "subit", "=", "tasklets", ".", "SerialQueueFuture", "(", "'_MultiQuery.run_to_queue[ser]'", ")", "subq", ".", "run_to_queue", "(", "subit", ",", "conn", ",", "options", "=", "options", ")", "while", "limit", ">", "0", ":", "try", ":", "batch", ",", "index", ",", "result", "=", "yield", "subit", ".", "getq", "(", ")", "except", "EOFError", ":", "break", "if", "keys_only", ":", "key", "=", "result", "else", ":", "key", "=", "result", ".", "_key", "if", "key", "not", "in", "keys_seen", ":", "keys_seen", ".", "add", "(", "key", ")", "if", "offset", ">", "0", ":", "offset", "-=", "1", "else", ":", "limit", "-=", "1", "queue", ".", "putq", "(", "(", "None", ",", "None", ",", "result", ")", ")", "queue", ".", "complete", "(", ")", "return", "# This with-statement causes the adapter to set _orig_pb on all", "# entities it converts from protobuf.", "# TODO: Does this interact properly with the cache?", "with", "conn", ".", "adapter", ":", "# Start running all the sub-queries.", "todo", "=", "[", "]", "# List of (subit, dsquery) tuples.", "for", "subq", "in", "self", ".", "__subqueries", ":", "dsquery", "=", "subq", ".", "_get_query", "(", "conn", ")", "subit", "=", "tasklets", ".", "SerialQueueFuture", "(", "'_MultiQuery.run_to_queue[par]'", ")", "subq", ".", "run_to_queue", "(", "subit", ",", "conn", ",", "options", "=", "options", ",", "dsquery", "=", "dsquery", ")", "todo", ".", "append", "(", "(", "subit", ",", "dsquery", ")", ")", "# Create a list of (first-entity, subquery-iterator) tuples.", "state", "=", "[", "]", "# List of _SubQueryIteratorState instances.", "for", "subit", ",", "dsquery", "in", "todo", ":", "try", ":", "thing", "=", "yield", "subit", ".", "getq", "(", ")", "except", "EOFError", ":", "continue", "else", ":", "state", ".", "append", "(", "_SubQueryIteratorState", "(", "thing", ",", "subit", ",", "dsquery", ",", "self", ".", "__orders", ")", ")", "# Now turn it into a sorted heap. The heapq module claims that", "# calling heapify() is more efficient than calling heappush() for", "# each item.", "heapq", ".", "heapify", "(", "state", ")", "# Repeatedly yield the lowest entity from the state vector,", "# filtering duplicates. This is essentially a multi-way merge", "# sort. One would think it should be possible to filter", "# duplicates simply by dropping other entities already in the", "# state vector that are equal to the lowest entity, but because of", "# the weird sorting of repeated properties, we have to explicitly", "# keep a set of all keys, so we can remove later occurrences.", "# Note that entities will still be sorted correctly, within the", "# constraints given by the sort order.", "keys_seen", "=", "set", "(", ")", "while", "state", "and", "limit", ">", "0", ":", "item", "=", "heapq", ".", "heappop", "(", "state", ")", "batch", "=", "item", ".", "batch", "index", "=", "item", ".", "index", "entity", "=", "item", ".", "entity", "key", "=", "entity", ".", "_key", "if", "key", "not", "in", "keys_seen", ":", "keys_seen", ".", "add", "(", "key", ")", "if", "offset", ">", "0", ":", "offset", "-=", "1", "else", ":", "limit", "-=", "1", "if", "keys_only", ":", "queue", ".", "putq", "(", "(", "batch", ",", "index", ",", "key", ")", ")", "else", ":", "queue", ".", "putq", "(", "(", "batch", ",", "index", ",", "entity", ")", ")", "subit", "=", "item", ".", "iterator", "try", ":", "batch", ",", "index", ",", "entity", "=", "yield", "subit", ".", "getq", "(", ")", "except", "EOFError", ":", "pass", "else", ":", "item", ".", "batch", "=", "batch", "item", ".", "index", "=", "index", "item", ".", "entity", "=", "entity", "heapq", ".", "heappush", "(", "state", ",", "item", ")", "queue", ".", "complete", "(", ")" ]
Run this query, putting entities into the given queue.
[ "Run", "this", "query", "putting", "entities", "into", "the", "given", "queue", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/query.py#L1910-L2047
GoogleCloudPlatform/datastore-ndb-python
ndb/memcache_client.py
MemcacheClient.memcache_get
def memcache_get(self, key, for_cas=False, namespace=None, use_cache=False, deadline=None): """An auto-batching wrapper for memcache.get() or .get_multi(). Args: key: Key to set. This must be a string; no prefix is applied. for_cas: If True, request and store CAS ids on the Context. namespace: Optional namespace. deadline: Optional deadline for the RPC. Returns: A Future (!) whose return value is the value retrieved from memcache, or None. """ if not isinstance(key, basestring): raise TypeError('key must be a string; received %r' % key) if not isinstance(for_cas, bool): raise TypeError('for_cas must be a bool; received %r' % for_cas) if namespace is None: namespace = namespace_manager.get_namespace() options = (for_cas, namespace, deadline) batcher = self.memcache_get_batcher if use_cache: return batcher.add_once(key, options) else: return batcher.add(key, options)
python
def memcache_get(self, key, for_cas=False, namespace=None, use_cache=False, deadline=None): """An auto-batching wrapper for memcache.get() or .get_multi(). Args: key: Key to set. This must be a string; no prefix is applied. for_cas: If True, request and store CAS ids on the Context. namespace: Optional namespace. deadline: Optional deadline for the RPC. Returns: A Future (!) whose return value is the value retrieved from memcache, or None. """ if not isinstance(key, basestring): raise TypeError('key must be a string; received %r' % key) if not isinstance(for_cas, bool): raise TypeError('for_cas must be a bool; received %r' % for_cas) if namespace is None: namespace = namespace_manager.get_namespace() options = (for_cas, namespace, deadline) batcher = self.memcache_get_batcher if use_cache: return batcher.add_once(key, options) else: return batcher.add(key, options)
[ "def", "memcache_get", "(", "self", ",", "key", ",", "for_cas", "=", "False", ",", "namespace", "=", "None", ",", "use_cache", "=", "False", ",", "deadline", "=", "None", ")", ":", "if", "not", "isinstance", "(", "key", ",", "basestring", ")", ":", "raise", "TypeError", "(", "'key must be a string; received %r'", "%", "key", ")", "if", "not", "isinstance", "(", "for_cas", ",", "bool", ")", ":", "raise", "TypeError", "(", "'for_cas must be a bool; received %r'", "%", "for_cas", ")", "if", "namespace", "is", "None", ":", "namespace", "=", "namespace_manager", ".", "get_namespace", "(", ")", "options", "=", "(", "for_cas", ",", "namespace", ",", "deadline", ")", "batcher", "=", "self", ".", "memcache_get_batcher", "if", "use_cache", ":", "return", "batcher", ".", "add_once", "(", "key", ",", "options", ")", "else", ":", "return", "batcher", ".", "add", "(", "key", ",", "options", ")" ]
An auto-batching wrapper for memcache.get() or .get_multi(). Args: key: Key to set. This must be a string; no prefix is applied. for_cas: If True, request and store CAS ids on the Context. namespace: Optional namespace. deadline: Optional deadline for the RPC. Returns: A Future (!) whose return value is the value retrieved from memcache, or None.
[ "An", "auto", "-", "batching", "wrapper", "for", "memcache", ".", "get", "()", "or", ".", "get_multi", "()", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/memcache_client.py#L103-L128
GoogleCloudPlatform/datastore-ndb-python
ndb/utils.py
positional
def positional(max_pos_args): """A decorator to declare that only the first N arguments may be positional. Note that for methods, n includes 'self'. """ __ndb_debug__ = 'SKIP' def positional_decorator(wrapped): if not DEBUG: return wrapped __ndb_debug__ = 'SKIP' @wrapping(wrapped) def positional_wrapper(*args, **kwds): __ndb_debug__ = 'SKIP' if len(args) > max_pos_args: plural_s = '' if max_pos_args != 1: plural_s = 's' raise TypeError( '%s() takes at most %d positional argument%s (%d given)' % (wrapped.__name__, max_pos_args, plural_s, len(args))) return wrapped(*args, **kwds) return positional_wrapper return positional_decorator
python
def positional(max_pos_args): """A decorator to declare that only the first N arguments may be positional. Note that for methods, n includes 'self'. """ __ndb_debug__ = 'SKIP' def positional_decorator(wrapped): if not DEBUG: return wrapped __ndb_debug__ = 'SKIP' @wrapping(wrapped) def positional_wrapper(*args, **kwds): __ndb_debug__ = 'SKIP' if len(args) > max_pos_args: plural_s = '' if max_pos_args != 1: plural_s = 's' raise TypeError( '%s() takes at most %d positional argument%s (%d given)' % (wrapped.__name__, max_pos_args, plural_s, len(args))) return wrapped(*args, **kwds) return positional_wrapper return positional_decorator
[ "def", "positional", "(", "max_pos_args", ")", ":", "__ndb_debug__", "=", "'SKIP'", "def", "positional_decorator", "(", "wrapped", ")", ":", "if", "not", "DEBUG", ":", "return", "wrapped", "__ndb_debug__", "=", "'SKIP'", "@", "wrapping", "(", "wrapped", ")", "def", "positional_wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "__ndb_debug__", "=", "'SKIP'", "if", "len", "(", "args", ")", ">", "max_pos_args", ":", "plural_s", "=", "''", "if", "max_pos_args", "!=", "1", ":", "plural_s", "=", "'s'", "raise", "TypeError", "(", "'%s() takes at most %d positional argument%s (%d given)'", "%", "(", "wrapped", ".", "__name__", ",", "max_pos_args", ",", "plural_s", ",", "len", "(", "args", ")", ")", ")", "return", "wrapped", "(", "*", "args", ",", "*", "*", "kwds", ")", "return", "positional_wrapper", "return", "positional_decorator" ]
A decorator to declare that only the first N arguments may be positional. Note that for methods, n includes 'self'.
[ "A", "decorator", "to", "declare", "that", "only", "the", "first", "N", "arguments", "may", "be", "positional", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/utils.py#L138-L162
GoogleCloudPlatform/datastore-ndb-python
ndb/utils.py
decorator
def decorator(wrapped_decorator): """Converts a function into a decorator that optionally accepts keyword arguments in its declaration. Example usage: @utils.decorator def decorator(func, args, kwds, op1=None): ... apply op1 ... return func(*args, **kwds) # Form (1), vanilla @decorator foo(...) ... # Form (2), with options @decorator(op1=5) foo(...) ... Args: wrapped_decorator: A function that accepts positional args (func, args, kwds) and any additional supported keyword arguments. Returns: A decorator with an additional 'wrapped_decorator' property that is set to the original function. """ def helper(_func=None, **options): def outer_wrapper(func): @wrapping(func) def inner_wrapper(*args, **kwds): return wrapped_decorator(func, args, kwds, **options) return inner_wrapper if _func is None: # Form (2), with options. return outer_wrapper # Form (1), vanilla. if options: # Don't allow @decorator(foo, op1=5). raise TypeError('positional arguments not supported') return outer_wrapper(_func) helper.wrapped_decorator = wrapped_decorator return helper
python
def decorator(wrapped_decorator): """Converts a function into a decorator that optionally accepts keyword arguments in its declaration. Example usage: @utils.decorator def decorator(func, args, kwds, op1=None): ... apply op1 ... return func(*args, **kwds) # Form (1), vanilla @decorator foo(...) ... # Form (2), with options @decorator(op1=5) foo(...) ... Args: wrapped_decorator: A function that accepts positional args (func, args, kwds) and any additional supported keyword arguments. Returns: A decorator with an additional 'wrapped_decorator' property that is set to the original function. """ def helper(_func=None, **options): def outer_wrapper(func): @wrapping(func) def inner_wrapper(*args, **kwds): return wrapped_decorator(func, args, kwds, **options) return inner_wrapper if _func is None: # Form (2), with options. return outer_wrapper # Form (1), vanilla. if options: # Don't allow @decorator(foo, op1=5). raise TypeError('positional arguments not supported') return outer_wrapper(_func) helper.wrapped_decorator = wrapped_decorator return helper
[ "def", "decorator", "(", "wrapped_decorator", ")", ":", "def", "helper", "(", "_func", "=", "None", ",", "*", "*", "options", ")", ":", "def", "outer_wrapper", "(", "func", ")", ":", "@", "wrapping", "(", "func", ")", "def", "inner_wrapper", "(", "*", "args", ",", "*", "*", "kwds", ")", ":", "return", "wrapped_decorator", "(", "func", ",", "args", ",", "kwds", ",", "*", "*", "options", ")", "return", "inner_wrapper", "if", "_func", "is", "None", ":", "# Form (2), with options.", "return", "outer_wrapper", "# Form (1), vanilla.", "if", "options", ":", "# Don't allow @decorator(foo, op1=5).", "raise", "TypeError", "(", "'positional arguments not supported'", ")", "return", "outer_wrapper", "(", "_func", ")", "helper", ".", "wrapped_decorator", "=", "wrapped_decorator", "return", "helper" ]
Converts a function into a decorator that optionally accepts keyword arguments in its declaration. Example usage: @utils.decorator def decorator(func, args, kwds, op1=None): ... apply op1 ... return func(*args, **kwds) # Form (1), vanilla @decorator foo(...) ... # Form (2), with options @decorator(op1=5) foo(...) ... Args: wrapped_decorator: A function that accepts positional args (func, args, kwds) and any additional supported keyword arguments. Returns: A decorator with an additional 'wrapped_decorator' property that is set to the original function.
[ "Converts", "a", "function", "into", "a", "decorator", "that", "optionally", "accepts", "keyword", "arguments", "in", "its", "declaration", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/utils.py#L165-L210
GoogleCloudPlatform/datastore-ndb-python
demo/app/fibo.py
fibonacci
def fibonacci(n): """A recursive Fibonacci to exercise task switching.""" if n <= 1: raise ndb.Return(n) a, b = yield fibonacci(n - 1), fibonacci(n - 2) raise ndb.Return(a + b)
python
def fibonacci(n): """A recursive Fibonacci to exercise task switching.""" if n <= 1: raise ndb.Return(n) a, b = yield fibonacci(n - 1), fibonacci(n - 2) raise ndb.Return(a + b)
[ "def", "fibonacci", "(", "n", ")", ":", "if", "n", "<=", "1", ":", "raise", "ndb", ".", "Return", "(", "n", ")", "a", ",", "b", "=", "yield", "fibonacci", "(", "n", "-", "1", ")", ",", "fibonacci", "(", "n", "-", "2", ")", "raise", "ndb", ".", "Return", "(", "a", "+", "b", ")" ]
A recursive Fibonacci to exercise task switching.
[ "A", "recursive", "Fibonacci", "to", "exercise", "task", "switching", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/demo/app/fibo.py#L32-L37
GoogleCloudPlatform/datastore-ndb-python
demo/app/fibo.py
memoizing_fibonacci
def memoizing_fibonacci(n): """A memoizing recursive Fibonacci to exercise RPCs.""" if n <= 1: raise ndb.Return(n) key = ndb.Key(FibonacciMemo, str(n)) memo = yield key.get_async(ndb_should_cache=False) if memo is not None: assert memo.arg == n logging.info('memo hit: %d -> %d', n, memo.value) raise ndb.Return(memo.value) logging.info('memo fail: %d', n) a = yield memoizing_fibonacci(n - 1) b = yield memoizing_fibonacci(n - 2) ans = a + b memo = FibonacciMemo(key=key, arg=n, value=ans) logging.info('memo write: %d -> %d', n, memo.value) yield memo.put_async(ndb_should_cache=False) raise ndb.Return(ans)
python
def memoizing_fibonacci(n): """A memoizing recursive Fibonacci to exercise RPCs.""" if n <= 1: raise ndb.Return(n) key = ndb.Key(FibonacciMemo, str(n)) memo = yield key.get_async(ndb_should_cache=False) if memo is not None: assert memo.arg == n logging.info('memo hit: %d -> %d', n, memo.value) raise ndb.Return(memo.value) logging.info('memo fail: %d', n) a = yield memoizing_fibonacci(n - 1) b = yield memoizing_fibonacci(n - 2) ans = a + b memo = FibonacciMemo(key=key, arg=n, value=ans) logging.info('memo write: %d -> %d', n, memo.value) yield memo.put_async(ndb_should_cache=False) raise ndb.Return(ans)
[ "def", "memoizing_fibonacci", "(", "n", ")", ":", "if", "n", "<=", "1", ":", "raise", "ndb", ".", "Return", "(", "n", ")", "key", "=", "ndb", ".", "Key", "(", "FibonacciMemo", ",", "str", "(", "n", ")", ")", "memo", "=", "yield", "key", ".", "get_async", "(", "ndb_should_cache", "=", "False", ")", "if", "memo", "is", "not", "None", ":", "assert", "memo", ".", "arg", "==", "n", "logging", ".", "info", "(", "'memo hit: %d -> %d'", ",", "n", ",", "memo", ".", "value", ")", "raise", "ndb", ".", "Return", "(", "memo", ".", "value", ")", "logging", ".", "info", "(", "'memo fail: %d'", ",", "n", ")", "a", "=", "yield", "memoizing_fibonacci", "(", "n", "-", "1", ")", "b", "=", "yield", "memoizing_fibonacci", "(", "n", "-", "2", ")", "ans", "=", "a", "+", "b", "memo", "=", "FibonacciMemo", "(", "key", "=", "key", ",", "arg", "=", "n", ",", "value", "=", "ans", ")", "logging", ".", "info", "(", "'memo write: %d -> %d'", ",", "n", ",", "memo", ".", "value", ")", "yield", "memo", ".", "put_async", "(", "ndb_should_cache", "=", "False", ")", "raise", "ndb", ".", "Return", "(", "ans", ")" ]
A memoizing recursive Fibonacci to exercise RPCs.
[ "A", "memoizing", "recursive", "Fibonacci", "to", "exercise", "RPCs", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/demo/app/fibo.py#L46-L63
GoogleCloudPlatform/datastore-ndb-python
ndb/autobatcher.py
AutoBatcher.run_queue
def run_queue(self, options, todo): """Actually run the _todo_tasklet.""" utils.logging_debug('AutoBatcher(%s): %d items', self._todo_tasklet.__name__, len(todo)) batch_fut = self._todo_tasklet(todo, options) self._running.append(batch_fut) # Add a callback when we're done. batch_fut.add_callback(self._finished_callback, batch_fut, todo)
python
def run_queue(self, options, todo): """Actually run the _todo_tasklet.""" utils.logging_debug('AutoBatcher(%s): %d items', self._todo_tasklet.__name__, len(todo)) batch_fut = self._todo_tasklet(todo, options) self._running.append(batch_fut) # Add a callback when we're done. batch_fut.add_callback(self._finished_callback, batch_fut, todo)
[ "def", "run_queue", "(", "self", ",", "options", ",", "todo", ")", ":", "utils", ".", "logging_debug", "(", "'AutoBatcher(%s): %d items'", ",", "self", ".", "_todo_tasklet", ".", "__name__", ",", "len", "(", "todo", ")", ")", "batch_fut", "=", "self", ".", "_todo_tasklet", "(", "todo", ",", "options", ")", "self", ".", "_running", ".", "append", "(", "batch_fut", ")", "# Add a callback when we're done.", "batch_fut", ".", "add_callback", "(", "self", ".", "_finished_callback", ",", "batch_fut", ",", "todo", ")" ]
Actually run the _todo_tasklet.
[ "Actually", "run", "the", "_todo_tasklet", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/autobatcher.py#L71-L78
GoogleCloudPlatform/datastore-ndb-python
ndb/autobatcher.py
AutoBatcher.add
def add(self, arg, options=None): """Adds an arg and gets back a future. Args: arg: one argument for _todo_tasklet. options: rpc options. Return: An instance of future, representing the result of running _todo_tasklet without batching. """ fut = tasklets.Future('%s.add(%s, %s)' % (self, arg, options)) todo = self._queues.get(options) if todo is None: utils.logging_debug('AutoBatcher(%s): creating new queue for %r', self._todo_tasklet.__name__, options) if not self._queues: eventloop.add_idle(self._on_idle) todo = self._queues[options] = [] todo.append((fut, arg)) if len(todo) >= self._limit: del self._queues[options] self.run_queue(options, todo) return fut
python
def add(self, arg, options=None): """Adds an arg and gets back a future. Args: arg: one argument for _todo_tasklet. options: rpc options. Return: An instance of future, representing the result of running _todo_tasklet without batching. """ fut = tasklets.Future('%s.add(%s, %s)' % (self, arg, options)) todo = self._queues.get(options) if todo is None: utils.logging_debug('AutoBatcher(%s): creating new queue for %r', self._todo_tasklet.__name__, options) if not self._queues: eventloop.add_idle(self._on_idle) todo = self._queues[options] = [] todo.append((fut, arg)) if len(todo) >= self._limit: del self._queues[options] self.run_queue(options, todo) return fut
[ "def", "add", "(", "self", ",", "arg", ",", "options", "=", "None", ")", ":", "fut", "=", "tasklets", ".", "Future", "(", "'%s.add(%s, %s)'", "%", "(", "self", ",", "arg", ",", "options", ")", ")", "todo", "=", "self", ".", "_queues", ".", "get", "(", "options", ")", "if", "todo", "is", "None", ":", "utils", ".", "logging_debug", "(", "'AutoBatcher(%s): creating new queue for %r'", ",", "self", ".", "_todo_tasklet", ".", "__name__", ",", "options", ")", "if", "not", "self", ".", "_queues", ":", "eventloop", ".", "add_idle", "(", "self", ".", "_on_idle", ")", "todo", "=", "self", ".", "_queues", "[", "options", "]", "=", "[", "]", "todo", ".", "append", "(", "(", "fut", ",", "arg", ")", ")", "if", "len", "(", "todo", ")", ">=", "self", ".", "_limit", ":", "del", "self", ".", "_queues", "[", "options", "]", "self", ".", "run_queue", "(", "options", ",", "todo", ")", "return", "fut" ]
Adds an arg and gets back a future. Args: arg: one argument for _todo_tasklet. options: rpc options. Return: An instance of future, representing the result of running _todo_tasklet without batching.
[ "Adds", "an", "arg", "and", "gets", "back", "a", "future", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/autobatcher.py#L90-L113
GoogleCloudPlatform/datastore-ndb-python
ndb/autobatcher.py
AutoBatcher._finished_callback
def _finished_callback(self, batch_fut, todo): """Passes exception along. Args: batch_fut: the batch future returned by running todo_tasklet. todo: (fut, option) pair. fut is the future return by each add() call. If the batch fut was successful, it has already called fut.set_result() on other individual futs. This method only handles when the batch fut encountered an exception. """ self._running.remove(batch_fut) err = batch_fut.get_exception() if err is not None: tb = batch_fut.get_traceback() for (fut, _) in todo: if not fut.done(): fut.set_exception(err, tb)
python
def _finished_callback(self, batch_fut, todo): """Passes exception along. Args: batch_fut: the batch future returned by running todo_tasklet. todo: (fut, option) pair. fut is the future return by each add() call. If the batch fut was successful, it has already called fut.set_result() on other individual futs. This method only handles when the batch fut encountered an exception. """ self._running.remove(batch_fut) err = batch_fut.get_exception() if err is not None: tb = batch_fut.get_traceback() for (fut, _) in todo: if not fut.done(): fut.set_exception(err, tb)
[ "def", "_finished_callback", "(", "self", ",", "batch_fut", ",", "todo", ")", ":", "self", ".", "_running", ".", "remove", "(", "batch_fut", ")", "err", "=", "batch_fut", ".", "get_exception", "(", ")", "if", "err", "is", "not", "None", ":", "tb", "=", "batch_fut", ".", "get_traceback", "(", ")", "for", "(", "fut", ",", "_", ")", "in", "todo", ":", "if", "not", "fut", ".", "done", "(", ")", ":", "fut", ".", "set_exception", "(", "err", ",", "tb", ")" ]
Passes exception along. Args: batch_fut: the batch future returned by running todo_tasklet. todo: (fut, option) pair. fut is the future return by each add() call. If the batch fut was successful, it has already called fut.set_result() on other individual futs. This method only handles when the batch fut encountered an exception.
[ "Passes", "exception", "along", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/autobatcher.py#L132-L149
GoogleCloudPlatform/datastore-ndb-python
ndb/metadata.py
get_namespaces
def get_namespaces(start=None, end=None): """Return all namespaces in the specified range. Args: start: only return namespaces >= start if start is not None. end: only return namespaces < end if end is not None. Returns: A list of namespace names between the (optional) start and end values. """ q = Namespace.query() if start is not None: q = q.filter(Namespace.key >= Namespace.key_for_namespace(start)) if end is not None: q = q.filter(Namespace.key < Namespace.key_for_namespace(end)) return [x.namespace_name for x in q]
python
def get_namespaces(start=None, end=None): """Return all namespaces in the specified range. Args: start: only return namespaces >= start if start is not None. end: only return namespaces < end if end is not None. Returns: A list of namespace names between the (optional) start and end values. """ q = Namespace.query() if start is not None: q = q.filter(Namespace.key >= Namespace.key_for_namespace(start)) if end is not None: q = q.filter(Namespace.key < Namespace.key_for_namespace(end)) return [x.namespace_name for x in q]
[ "def", "get_namespaces", "(", "start", "=", "None", ",", "end", "=", "None", ")", ":", "q", "=", "Namespace", ".", "query", "(", ")", "if", "start", "is", "not", "None", ":", "q", "=", "q", ".", "filter", "(", "Namespace", ".", "key", ">=", "Namespace", ".", "key_for_namespace", "(", "start", ")", ")", "if", "end", "is", "not", "None", ":", "q", "=", "q", ".", "filter", "(", "Namespace", ".", "key", "<", "Namespace", ".", "key_for_namespace", "(", "end", ")", ")", "return", "[", "x", ".", "namespace_name", "for", "x", "in", "q", "]" ]
Return all namespaces in the specified range. Args: start: only return namespaces >= start if start is not None. end: only return namespaces < end if end is not None. Returns: A list of namespace names between the (optional) start and end values.
[ "Return", "all", "namespaces", "in", "the", "specified", "range", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L239-L254
GoogleCloudPlatform/datastore-ndb-python
ndb/metadata.py
get_kinds
def get_kinds(start=None, end=None): """Return all kinds in the specified range, for the current namespace. Args: start: only return kinds >= start if start is not None. end: only return kinds < end if end is not None. Returns: A list of kind names between the (optional) start and end values. """ q = Kind.query() if start is not None and start != '': q = q.filter(Kind.key >= Kind.key_for_kind(start)) if end is not None: if end == '': return [] q = q.filter(Kind.key < Kind.key_for_kind(end)) return [x.kind_name for x in q]
python
def get_kinds(start=None, end=None): """Return all kinds in the specified range, for the current namespace. Args: start: only return kinds >= start if start is not None. end: only return kinds < end if end is not None. Returns: A list of kind names between the (optional) start and end values. """ q = Kind.query() if start is not None and start != '': q = q.filter(Kind.key >= Kind.key_for_kind(start)) if end is not None: if end == '': return [] q = q.filter(Kind.key < Kind.key_for_kind(end)) return [x.kind_name for x in q]
[ "def", "get_kinds", "(", "start", "=", "None", ",", "end", "=", "None", ")", ":", "q", "=", "Kind", ".", "query", "(", ")", "if", "start", "is", "not", "None", "and", "start", "!=", "''", ":", "q", "=", "q", ".", "filter", "(", "Kind", ".", "key", ">=", "Kind", ".", "key_for_kind", "(", "start", ")", ")", "if", "end", "is", "not", "None", ":", "if", "end", "==", "''", ":", "return", "[", "]", "q", "=", "q", ".", "filter", "(", "Kind", ".", "key", "<", "Kind", ".", "key_for_kind", "(", "end", ")", ")", "return", "[", "x", ".", "kind_name", "for", "x", "in", "q", "]" ]
Return all kinds in the specified range, for the current namespace. Args: start: only return kinds >= start if start is not None. end: only return kinds < end if end is not None. Returns: A list of kind names between the (optional) start and end values.
[ "Return", "all", "kinds", "in", "the", "specified", "range", "for", "the", "current", "namespace", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L257-L275
GoogleCloudPlatform/datastore-ndb-python
ndb/metadata.py
get_properties_of_kind
def get_properties_of_kind(kind, start=None, end=None): """Return all properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A list of property names of kind between the (optional) start and end values. """ q = Property.query(ancestor=Property.key_for_kind(kind)) if start is not None and start != '': q = q.filter(Property.key >= Property.key_for_property(kind, start)) if end is not None: if end == '': return [] q = q.filter(Property.key < Property.key_for_property(kind, end)) return [Property.key_to_property(k) for k in q.iter(keys_only=True)]
python
def get_properties_of_kind(kind, start=None, end=None): """Return all properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A list of property names of kind between the (optional) start and end values. """ q = Property.query(ancestor=Property.key_for_kind(kind)) if start is not None and start != '': q = q.filter(Property.key >= Property.key_for_property(kind, start)) if end is not None: if end == '': return [] q = q.filter(Property.key < Property.key_for_property(kind, end)) return [Property.key_to_property(k) for k in q.iter(keys_only=True)]
[ "def", "get_properties_of_kind", "(", "kind", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "q", "=", "Property", ".", "query", "(", "ancestor", "=", "Property", ".", "key_for_kind", "(", "kind", ")", ")", "if", "start", "is", "not", "None", "and", "start", "!=", "''", ":", "q", "=", "q", ".", "filter", "(", "Property", ".", "key", ">=", "Property", ".", "key_for_property", "(", "kind", ",", "start", ")", ")", "if", "end", "is", "not", "None", ":", "if", "end", "==", "''", ":", "return", "[", "]", "q", "=", "q", ".", "filter", "(", "Property", ".", "key", "<", "Property", ".", "key_for_property", "(", "kind", ",", "end", ")", ")", "return", "[", "Property", ".", "key_to_property", "(", "k", ")", "for", "k", "in", "q", ".", "iter", "(", "keys_only", "=", "True", ")", "]" ]
Return all properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A list of property names of kind between the (optional) start and end values.
[ "Return", "all", "properties", "of", "kind", "in", "the", "specified", "range", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L278-L300
GoogleCloudPlatform/datastore-ndb-python
ndb/metadata.py
get_representations_of_kind
def get_representations_of_kind(kind, start=None, end=None): """Return all representations of properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A dictionary mapping property names to its list of representations. """ q = Property.query(ancestor=Property.key_for_kind(kind)) if start is not None and start != '': q = q.filter(Property.key >= Property.key_for_property(kind, start)) if end is not None: if end == '': return {} q = q.filter(Property.key < Property.key_for_property(kind, end)) result = {} for property in q: result[property.property_name] = property.property_representation return result
python
def get_representations_of_kind(kind, start=None, end=None): """Return all representations of properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A dictionary mapping property names to its list of representations. """ q = Property.query(ancestor=Property.key_for_kind(kind)) if start is not None and start != '': q = q.filter(Property.key >= Property.key_for_property(kind, start)) if end is not None: if end == '': return {} q = q.filter(Property.key < Property.key_for_property(kind, end)) result = {} for property in q: result[property.property_name] = property.property_representation return result
[ "def", "get_representations_of_kind", "(", "kind", ",", "start", "=", "None", ",", "end", "=", "None", ")", ":", "q", "=", "Property", ".", "query", "(", "ancestor", "=", "Property", ".", "key_for_kind", "(", "kind", ")", ")", "if", "start", "is", "not", "None", "and", "start", "!=", "''", ":", "q", "=", "q", ".", "filter", "(", "Property", ".", "key", ">=", "Property", ".", "key_for_property", "(", "kind", ",", "start", ")", ")", "if", "end", "is", "not", "None", ":", "if", "end", "==", "''", ":", "return", "{", "}", "q", "=", "q", ".", "filter", "(", "Property", ".", "key", "<", "Property", ".", "key_for_property", "(", "kind", ",", "end", ")", ")", "result", "=", "{", "}", "for", "property", "in", "q", ":", "result", "[", "property", ".", "property_name", "]", "=", "property", ".", "property_representation", "return", "result" ]
Return all representations of properties of kind in the specified range. NOTE: This function does not return unindexed properties. Args: kind: name of kind whose properties you want. start: only return properties >= start if start is not None. end: only return properties < end if end is not None. Returns: A dictionary mapping property names to its list of representations.
[ "Return", "all", "representations", "of", "properties", "of", "kind", "in", "the", "specified", "range", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L303-L328
GoogleCloudPlatform/datastore-ndb-python
ndb/metadata.py
get_entity_group_version
def get_entity_group_version(key): """Return the version of the entity group containing key. Args: key: a key for an entity group whose __entity_group__ key you want. Returns: The version of the entity group containing key. This version is guaranteed to increase on every change to the entity group. The version may increase even in the absence of user-visible changes to the entity group. May return None if the entity group was never written to. On non-HR datatores, this function returns None. """ eg = EntityGroup.key_for_entity_group(key).get() if eg: return eg.version else: return None
python
def get_entity_group_version(key): """Return the version of the entity group containing key. Args: key: a key for an entity group whose __entity_group__ key you want. Returns: The version of the entity group containing key. This version is guaranteed to increase on every change to the entity group. The version may increase even in the absence of user-visible changes to the entity group. May return None if the entity group was never written to. On non-HR datatores, this function returns None. """ eg = EntityGroup.key_for_entity_group(key).get() if eg: return eg.version else: return None
[ "def", "get_entity_group_version", "(", "key", ")", ":", "eg", "=", "EntityGroup", ".", "key_for_entity_group", "(", "key", ")", ".", "get", "(", ")", "if", "eg", ":", "return", "eg", ".", "version", "else", ":", "return", "None" ]
Return the version of the entity group containing key. Args: key: a key for an entity group whose __entity_group__ key you want. Returns: The version of the entity group containing key. This version is guaranteed to increase on every change to the entity group. The version may increase even in the absence of user-visible changes to the entity group. May return None if the entity group was never written to. On non-HR datatores, this function returns None.
[ "Return", "the", "version", "of", "the", "entity", "group", "containing", "key", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L331-L350
GoogleCloudPlatform/datastore-ndb-python
ndb/metadata.py
Namespace.key_for_namespace
def key_for_namespace(cls, namespace): """Return the Key for a namespace. Args: namespace: A string giving the namespace whose key is requested. Returns: The Key for the namespace. """ if namespace: return model.Key(cls.KIND_NAME, namespace) else: return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)
python
def key_for_namespace(cls, namespace): """Return the Key for a namespace. Args: namespace: A string giving the namespace whose key is requested. Returns: The Key for the namespace. """ if namespace: return model.Key(cls.KIND_NAME, namespace) else: return model.Key(cls.KIND_NAME, cls.EMPTY_NAMESPACE_ID)
[ "def", "key_for_namespace", "(", "cls", ",", "namespace", ")", ":", "if", "namespace", ":", "return", "model", ".", "Key", "(", "cls", ".", "KIND_NAME", ",", "namespace", ")", "else", ":", "return", "model", ".", "Key", "(", "cls", ".", "KIND_NAME", ",", "cls", ".", "EMPTY_NAMESPACE_ID", ")" ]
Return the Key for a namespace. Args: namespace: A string giving the namespace whose key is requested. Returns: The Key for the namespace.
[ "Return", "the", "Key", "for", "a", "namespace", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L76-L88
GoogleCloudPlatform/datastore-ndb-python
ndb/metadata.py
Property.key_for_property
def key_for_property(cls, kind, property): """Return the __property__ key for property of kind. Args: kind: kind whose key is requested. property: property whose key is requested. Returns: The key for property of kind. """ return model.Key(Kind.KIND_NAME, kind, Property.KIND_NAME, property)
python
def key_for_property(cls, kind, property): """Return the __property__ key for property of kind. Args: kind: kind whose key is requested. property: property whose key is requested. Returns: The key for property of kind. """ return model.Key(Kind.KIND_NAME, kind, Property.KIND_NAME, property)
[ "def", "key_for_property", "(", "cls", ",", "kind", ",", "property", ")", ":", "return", "model", ".", "Key", "(", "Kind", ".", "KIND_NAME", ",", "kind", ",", "Property", ".", "KIND_NAME", ",", "property", ")" ]
Return the __property__ key for property of kind. Args: kind: kind whose key is requested. property: property whose key is requested. Returns: The key for property of kind.
[ "Return", "the", "__property__", "key", "for", "property", "of", "kind", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L168-L178
GoogleCloudPlatform/datastore-ndb-python
ndb/metadata.py
Property.key_to_kind
def key_to_kind(cls, key): """Return the kind specified by a given __property__ key. Args: key: key whose kind name is requested. Returns: The kind specified by key. """ if key.kind() == Kind.KIND_NAME: return key.id() else: return key.parent().id()
python
def key_to_kind(cls, key): """Return the kind specified by a given __property__ key. Args: key: key whose kind name is requested. Returns: The kind specified by key. """ if key.kind() == Kind.KIND_NAME: return key.id() else: return key.parent().id()
[ "def", "key_to_kind", "(", "cls", ",", "key", ")", ":", "if", "key", ".", "kind", "(", ")", "==", "Kind", ".", "KIND_NAME", ":", "return", "key", ".", "id", "(", ")", "else", ":", "return", "key", ".", "parent", "(", ")", ".", "id", "(", ")" ]
Return the kind specified by a given __property__ key. Args: key: key whose kind name is requested. Returns: The kind specified by key.
[ "Return", "the", "kind", "specified", "by", "a", "given", "__property__", "key", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L181-L193
GoogleCloudPlatform/datastore-ndb-python
ndb/metadata.py
EntityGroup.key_for_entity_group
def key_for_entity_group(cls, key): """Return the key for the entity group containing key. Args: key: a key for an entity group whose __entity_group__ key you want. Returns: The __entity_group__ key for the entity group containing key. """ return model.Key(cls.KIND_NAME, cls.ID, parent=key.root())
python
def key_for_entity_group(cls, key): """Return the key for the entity group containing key. Args: key: a key for an entity group whose __entity_group__ key you want. Returns: The __entity_group__ key for the entity group containing key. """ return model.Key(cls.KIND_NAME, cls.ID, parent=key.root())
[ "def", "key_for_entity_group", "(", "cls", ",", "key", ")", ":", "return", "model", ".", "Key", "(", "cls", ".", "KIND_NAME", ",", "cls", ".", "ID", ",", "parent", "=", "key", ".", "root", "(", ")", ")" ]
Return the key for the entity group containing key. Args: key: a key for an entity group whose __entity_group__ key you want. Returns: The __entity_group__ key for the entity group containing key.
[ "Return", "the", "key", "for", "the", "entity", "group", "containing", "key", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/metadata.py#L227-L236
GoogleCloudPlatform/datastore-ndb-python
ndb/django_middleware.py
NdbDjangoMiddleware.process_request
def process_request(self, unused_request): """Called by Django before deciding which view to execute.""" # Compare to the first half of toplevel() in context.py. tasklets._state.clear_all_pending() # Create and install a new context. ctx = tasklets.make_default_context() tasklets.set_context(ctx)
python
def process_request(self, unused_request): """Called by Django before deciding which view to execute.""" # Compare to the first half of toplevel() in context.py. tasklets._state.clear_all_pending() # Create and install a new context. ctx = tasklets.make_default_context() tasklets.set_context(ctx)
[ "def", "process_request", "(", "self", ",", "unused_request", ")", ":", "# Compare to the first half of toplevel() in context.py.", "tasklets", ".", "_state", ".", "clear_all_pending", "(", ")", "# Create and install a new context.", "ctx", "=", "tasklets", ".", "make_default_context", "(", ")", "tasklets", ".", "set_context", "(", "ctx", ")" ]
Called by Django before deciding which view to execute.
[ "Called", "by", "Django", "before", "deciding", "which", "view", "to", "execute", "." ]
train
https://github.com/GoogleCloudPlatform/datastore-ndb-python/blob/cf4cab3f1f69cd04e1a9229871be466b53729f3f/ndb/django_middleware.py#L42-L48