blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0ad2c5085e508e5ebf1a8c7dcb70c365cb16bcd5
|
6e7351d569583fbb19561b881f9274cae0679365
|
/day07_2.py
|
1bf576db1e656aa6cc47eeaa16175547bfcf2d74
|
[] |
no_license
|
bellarm/advent-of-code-16
|
6b70f01edee573e6b83e1f76b6c3922e9925246c
|
f1db090bac3155ed720ae8b4da1d6ee84b0ee3e0
|
refs/heads/master
| 2021-05-01T00:51:14.886873
| 2016-12-27T08:21:56
| 2016-12-27T08:21:56
| 75,303,771
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,057
|
py
|
#!/usr/bin/python3.5
import sys, re
def support_ssl(ip):
words = ip.split(' ')
is_in_brackets = 0
patterns_inside_bracket = []
patterns_outside_bracket = []
for word in words:
for i in range(len(word)-2):
if word[i] == word[i+2]:
if is_in_brackets:
patterns_inside_bracket.append(word[i]+word[i+1]+word[i+2])
else:
patterns_outside_bracket.append(word[i]+word[i+1]+word[i+2])
if is_in_brackets == 1:
is_in_brackets = 0
else:
is_in_brackets = 1
# compares all the patterns from inside and outside brackets
for pat1 in patterns_inside_bracket:
for pat2 in patterns_outside_bracket:
if pat1[0] == pat2[1] and pat1[1] == pat2[0]:
return True
return False
ips = [line for line in sys.stdin]
valid_ip = 0
for ip in ips:
# convert all the brackets to spaces
ip = re.sub(r'[\[\]]', ' ', ip)
if support_ssl(ip):
valid_ip += 1
print(valid_ip)
|
[
"rachelitabella@rocketmail.com"
] |
rachelitabella@rocketmail.com
|
b65352806020cbc7f4408ddafb3e6bf196e381b2
|
60d30f3520c78bdacbb175dba685b4ba25d3ac04
|
/lankuai/lankuai/lkitsm/project/repairANDbuyer/views.py
|
4fcd56fc85e884fb1be86334cae4f674f4468424
|
[
"MIT"
] |
permissive
|
abiner/lankuai
|
c8dd1c0abe3ce4db1b19495deab2c28939570160
|
55a3631528acf1c46a471cb0616e28a5396faab5
|
refs/heads/master
| 2020-04-27T06:27:38.279051
| 2019-04-09T01:31:29
| 2019-04-09T01:31:29
| 174,108,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,466
|
py
|
from django.shortcuts import render
from login.models import User ,Departments
from .models import failureMessages ,faultType ,theDoorOf,cost ,quote
import random
import time
from django.shortcuts import redirect
#维修申请页面
def repair(request):
username = request.session['username']
user = User.objects.get(uname=username)
dep = user.udepname
depList = Departments.objects.all()
ftList = faultType.objects.all()
return render(request ,'repairANDbuyer/repair.html' ,{"username":username ,"dep":dep ,"depList":depList ,"ftList":ftList })
from .models import State
#增 维修表,刚提交上来,是为给检测技术人员的
def addrepair(request):
if request.method == "POST":
#faultID,fname,inunits,phone,faultclass,brandtype,equipmentID,faultdescribe,subtime,eventlevel
#faultID
faultID = str(int(time.strftime("%Y%m%d%H%M%S"))+int(random.randrange(1 ,1000)))
fname = request.POST.get("fname")
inunits = request.POST.get("inunits")
phone = request.POST.get("phone")
faultclass = request.POST.get("faultclass")
brandtype = request.POST.get("brandtype")
equipmentID = request.POST.get("equipmentID")
faultdescribe = request.POST.get("faultdescribe")
#subtime 时间
subtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(subtime ,"---------------------------subtime------------------------------")
subtime = request.POST.get("subtime")
#让事件等于什么呢?
eventlevel = 1
print(faultID,fname,inunits,phone,faultclass,brandtype,equipmentID,faultdescribe,subtime,eventlevel)
fm = failureMessages.createfailureMessages(faultID,fname,inunits,phone,faultclass,brandtype,equipmentID,faultdescribe,subtime,eventlevel)
fm.save()
return redirect('/repairANDbuyer/addrepairok/')
def addrepairok(request):
redirect = "/repairANDbuyer/repair"
return render(request, 'addok.html', {"redirect": redirect})
#维修
#检测人员 提交表
def verif(request ,pid):
username = request.session['username']
print(username ,"当前登录人")
path = request.path
print(path ,"path------------------------------------------------------------------------------------------------")
#/repairANDbuyer/20190131204044/
newpath = path[16:30]
print(newpath ,"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaanewpathaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
#维修表信息
fm = failureMessages.objects.get(faultID=newpath)
ft = faultType.objects.all()
dempList = Departments.objects.all()
#co = cost.objects.get(faultID=newpath)
print(dempList ,"---------------------------dempList-----------------------------")
user = User.objects.get(uname=username)
#当前登录人角色为1,显示维修表,及输入成本价信息
if user.role.roleid == 1:
return render(request, "repairANDbuyer/inspectorrole1.html", {"title": "技术检测及报成本价", "fm": fm, "ft": ft
, "dempList": dempList, "username": username})
#当前登录人角色为2,显示维修表和成本价表,及输入报价信息
elif user.role.roleid == 2 :
#成本价表
cos = cost.objects.get(faultID=newpath)
return render(request, "repairANDbuyer/inspectorrole2.html", {"title": "部门经理报价", "fm": fm, "ft": ft
, "dempList": dempList, "username": username ,"cos":cos})
#当前登录人角色为3
elif user.role.roleid == 3 :
return render(request, "repairANDbuyer/inspectorrole3.html", {"title": "技术检测人员", "fm": fm, "ft": ft
, "dempList": dempList, "username": username})
else:
print("-------------------------------------else-----------------------------------")
print(path[-1])
print("-------------------------------------else-----------------------------------")
return render(request, "ok.html", {"title": "技术检测及报成本价", "fm": fm, "ft": ft
, "dempList": dempList, "username": username})
#添加 唐黄 成本价
def addCostPrice(request):
if request.method == "POST":
faultID = request.POST.get("faultID")
cname = request.POST.get("cname")
cause = request.POST.get("cause")
supplies = request.POST.get("supplies")
costprice = request.POST.get("costprice")
subtime = request.POST.get("subtime")
#当前 时间
subtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(faultID,cname,cause,supplies,costprice,subtime)
co = cost.createcost(faultID,cname,cause,supplies,costprice,subtime)
co.save()
fm = failureMessages.objects.get(faultID=faultID)
#可维修则为2
if "pizhun" in request.POST:
fm.eventlevel = 2
#不可维修则为0
elif "bohui" in request.POST:
fm.eventlevel = 0
fm.save()
return redirect('/addCostPriceok/')
def addCostPriceok(request):
redirect = "/index"
return render(request, 'addok.html', {"redirect": redirect})
from django.core.paginator import Paginator
#添加部门经理报价 addcostPrice
def addquote(request):
print("i live you hahaahahahahahahahahahah")
if request.method == "POST":
username = request.session["username"]
faultID = request.POST.get("faultID")
quo = request.POST.get("quo")
comment = request.POST.get("comment")
subtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(faultID ,username ,quo ,comment ,subtime ,"-------------------------")
quot = quote.createquote(faultID ,username ,quo ,comment ,subtime)
quot.save()
fm = failureMessages.objects.get(faultID=faultID)
if "pizhun" in request.POST:
fm.eventlevel = 100
elif "bohui" in request.POST:
fm.eventlevel = 0
fm.save()
return redirect('/index')
# 维修 发起人查看自己的进程
def myrepair(request ,pageid):
username = request.session["username"]
myrepairList = failureMessages.objects.filter(fname=username)
paginator = Paginator(myrepairList ,6)
page = paginator.page(pageid)
return render(request ,"repairANDbuyer/myrepair.html" ,{"myrepairList":page})
#维修 ,特定的某一条进程
def onerepair(request ,pageid):
path = request.path
print(path ,'整个路径------------------------------')
newpath = path[25:39]#19,33
print(newpath ,"截取后的路径newpath000000000000000000000000")
#这里肯定要改的
failureM = failureMessages.objects.get(faultID=newpath)
print(failureM.eventlevel ,"--------------------------------目前该维修单 状态是啥")
if failureM.eventlevel == 1:
#提交成本价
result = "等待技术人员检测"
return render(request ,"repairANDbuyer/onerepair.html" ,{"title":"维修申请结果" ,"failureM":failureM ,"result":result})
elif failureM.eventlevel == 2:
#技术检测已完成,需要显示技术人员检测
result = "等待部门经理报价"
co = cost.objects.filter(faultID=newpath)
return render(request ,"repairANDbuyer/onerepair.html" ,{"title":"维修申请结果" ,"failureM":failureM ,"result":result ,"co":co})
elif failureM.eventlevel == 0 :
#终止进程,就算终止进程,你也得显示在那一步终止的
result = "已终止进程"
co = cost.objects.filter(faultID=newpath)
quot = quote.objects.filter(faultID=newpath)
#如果没有技术人员检测表
if not co:
return render(request ,"repairANDbuyer/onerepair.html" ,{"title":"维修申请结果" ,"failureM":failureM ,"result":result})
else:
if not quot:
return render(request ,"repairANDbuyer/onerepair.html" ,{"title":"维修申请结果" ,"failureM":failureM ,"result":result ,"co":co})
else:
return render(request, "repairANDbuyer/onerepair.html",
{"title": "维修申请结果", "failureM": failureM, "result": result, "co": co ,"quot":quot})
elif failureM.eventlevel == 100 :
#维修成功,显示技术人员检测,显示部门经理报价
result = "维修成功"
co = cost.objects.filter(faultID=newpath)
quot = quote.objects.filter(faultID=newpath)
return render(request ,"repairANDbuyer/onerepair.html" ,{"title":"维修申请结果" ,"failureM":failureM ,"result":result ,"co":co ,"quot":quot})
from .models import Goods ,purchaseApplyFor ,goodsCost ,managerConsent ,Consignee
#采购
def buyer(request):
#设置默认
purchaseID= str(int(time.strftime("%m%d%H%M%S"))+int(random.randrange(1 ,1000)))
#str(time.strftime("%m%d%H%M%S") + random.randrange(1, 1000))
print(purchaseID ,"==========================purchaseID")
username = request.session["username"]
print("当前登录人:" ,username ,"----------------")
user = User.objects.get(uname=username)
depname = user.udepname
print("当前登录人所属部门:" ,depname ,"----------------")
return render(request ,'repairANDbuyer/buyer.html' ,{"title":"采购申请页面" ,"purchaseID":purchaseID ,"pdemp":depname ,"username":username})
#采购 ,发起采购单
def addpurchase(request):
if request.method == "POST":
#获取商品信息
purchaseID = request.POST.get("purchaseID")
#获取申请信息
pdemp = request.POST.get("pdemp")
pname = request.POST.get("pname")
pdate = request.POST.get("pdate")
pcomment = request.POST.get("pcomment")
# pcourse 状态
pcourse = 1
# gnameList 多个物品名称 , gamountList 多个物品数量
gnameList = request.POST.getlist("name1", [])
gamountList = request.POST.getlist("name2", [])
count = len(gnameList)
print(len(gnameList), "多个物品名称是多少个呢????????????")
for item in range(count):
goods = Goods.creategoods(purchaseID, gnameList[item], gamountList[item])
goods.save()
# goods = Goods.creategoods(purchaseID ,gname ,gamount)
# goods.save()
paf = purchaseApplyFor.createpurchaseApplyFor(purchaseID ,pdemp ,pname ,pdate ,pcomment ,pcourse)
paf.save()
return redirect('/repairANDbuyer/addpurchaseok/')
def addpurchaseok(request):
redirect = "/repairANDbuyer/buyer"
return render(request, 'addok.html', {"redirect": redirect})
#采购 ,采购部唐涛提交成本价 ,role 为1, 部门经理通过并修改报价role 为2
def costprice(request ,pig):
username = request.session["username"]
user = User.objects.get(uname=username)
#首先展示申请页面
print(request.path ,"这是路径,需要剪切-----------------")
#/repairANDbuyer/2201246091
path = request.path
newpath = path[16:25]
print(newpath ,"这是剪切后的网址-----------------------")
paf = purchaseApplyFor.objects.get(purchaseID=newpath)
goods = Goods.objects.get(purchaseID=newpath)
#当前登录人role 为1 ,显示采购申请,采购部提交成本
if user.role == 1 :
return render(request, 'repairANDbuyer/costprice1.html', {"paf":paf , "goods":goods})
#当前登录人role 为2 ,显示采购申请 ,成本,提交同意
if user.role == 2 :
gc = goodsCost.objects.get(purchaseID=newpath)
return render(request, 'repairANDbuyer/costprice2.html', {"paf": paf, "goods": goods ,"gc":gc})
#当前登录人role为0,显示采购申请,成本,同意,提交收货时间
if user.role == 0 :
paf = purchaseApplyFor.objects.get(purchaseID=newpath)
gc = goodsCost.objects.get(purchaseID=newpath)
mc = managerConsent.objects.get(purchaseID=newpath)
cgdate = time.strftime("%Y-%m-%d/%H:%M", time.localtime(time.time()))
print(cgdate ,"========================cgdate===============================")
return render(request, 'repairANDbuyer/costprice3.html', {"paf": paf, "goods": goods, "gc": gc ,"mc":mc ,"cgdate":cgdate})
#采购 ,增加成本
def addgoodscost(request):
if request.method == "POST":
purchaseID = request.POST.get("purchaseID")
gcgprice = request.POST.get("cnt4")
suggestprice = request.POST.get("cnt5")
goodsprofit = int(suggestprice) - int(gcgprice)
gcdate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
print(gcdate ,"当前时间,格式是否咩问题呢")
pcomment = request.POST.get("pcomment")
print(purchaseID , gcgprice, suggestprice ,goodsprofit ,gcdate ,pcomment)
gc = goodsCost.creategoodsCost(purchaseID , gcgprice, suggestprice ,goodsprofit ,gcdate ,pcomment)
gc.save()
#保存进去了,就修改采购层级 ,自己再也看不到了,给部门经理看
paf = purchaseApplyFor.objects.get(purchaseID=purchaseID)
paf.pcourse = 2
paf.save()
return redirect('/index')
#采购 ,增加部门经理同意
def addmanageryes(request):
if request.method == "POST":
purchaseID = request.POST.get("purchaseID")
affirmprice = request.POST.get("affirmprice")
mcdate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
pcomment = request.POST.get("pcomment")
print(purchaseID ,affirmprice ,mcdate ,pcomment)
mc = managerConsent.createmanagerConsent(purchaseID ,affirmprice ,mcdate ,pcomment)
mc.save()
#保存好了,修改采购层级,
paf = purchaseApplyFor.objects.get(purchaseID=purchaseID)
paf.pcourse = 3
paf.save()
return redirect('/index')
#采购 ,增加发起人收货时间
def addconsignee(request):
if request.method == "POST":
purchaseID = request.POST.get("purchaseID")
cgdate = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
pcomment = request.POST.get("pcomment")
cg = Consignee.createconsignee(purchaseID ,cgdate ,pcomment)
cg.save()
paf = purchaseApplyFor.objects.get(purchaseID=purchaseID)
paf.pcourse = 100 #表示成功
paf.save()
return redirect('/index')
#采购 ,发起人查看进程
def mypurchase(request ,pageid):
username = request.session["username"]
print(username ,"当前登录用户------------")
user = User.objects.get(uname=username)
pafList = purchaseApplyFor.objects.filter(pname=username) #取出所有我申请的采购单
paginator = Paginator(pafList ,5)
page = paginator.page(pageid)
return render(request ,"repairANDbuyer/mypurchase.html" ,{"title":"我的采购申请" ,"pafList":page})
def onepurchase(request ,pageid):
print(request.path ,"-------------------------------------")#/repairANDbuyer/mypurchase/220161824/
path = request.path
newpath = path[27:36] #id
print(newpath ,"-------------------------------------")
paf = purchaseApplyFor.objects.get(purchaseID=newpath)
goodsList = Goods.objects.filter(purchaseID=newpath)
if paf.pcourse == 1:#只有申请表,和商品信息
return render(request, "repairANDbuyer/onepurchase.html" ,{"title":"具体进程" ,"paf":paf ,"goodsList":goodsList})
elif paf.pcourse == 2:#成本信息
gc = goodsCost.objects.get(purchaseID=newpath)
return render(request, "repairANDbuyer/onepurchase.html" ,{"title":"具体进程" ,"paf":paf ,"goodsList":goodsList ,"gc":gc})
elif paf.pcourse == 3:#部门经理同意
gc = goodsCost.objects.get(purchaseID=newpath)
mc = managerConsent.objects.get(purchaseID=newpath)
return render(request, "repairANDbuyer/onepurchase.html",
{"title": "具体进程", "paf": paf, "goodsList": goodsList, "gc": gc ,"mc":mc})
elif paf.pcourse == 100:
gc = goodsCost.objects.get(purchaseID=newpath)
mc = managerConsent.objects.get(purchaseID=newpath)
cg = Consignee.objects.get(purchaseID=newpath)
return render(request ,"repairANDbuyer/onepurchase.html" ,{"title":"具体进程" ,"paf":paf ,"goodsList":goodsList
,"gc":gc ,"mc":mc ,"cg":cg})
|
[
"153630535@qq.com"
] |
153630535@qq.com
|
25726eb229282248139e4ee3fdfe34b1fb699967
|
b28dec49edce6943e8a15cfe198cc6fc902e8e15
|
/main.py
|
e221c02315552fa9e94a93cc20811527544a7785
|
[] |
no_license
|
ddiyoo/vanilla_gan
|
a673df2580145ff27e3605792b066c1f48f4cbe2
|
5a31b709ec96c8fc04d93d8bd30e794eb205a56b
|
refs/heads/main
| 2023-01-11T14:44:53.790348
| 2020-11-12T11:22:42
| 2020-11-12T11:22:42
| 306,255,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,218
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
# def generator():
#
# model = tf.keras.Sequential()
# ## 25*25*128: 시퀀스 오브젝트 모델에 25*25*128개의 노드를 Dense 레이어를 통해 연결(?), bias 안씀, input 데이터의 shape이 (100,0)
# model.add(layers.Dense(25 * 25 * 128, use_bias=False,input_shape=(100,)))
# model.add(layers.BatchNormalization())
# model.add(layers.ReLU())
#
# model.add(layers.Reshape((25, 25, 128)))
# ## output_shape 이 25, 25, 128이 아닐경우 assertion error 출력
# assert model.output_shape == (None, 25, 25, 128)
#
# model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
# assert model.output_shape == (None, 25, 25, 128)
# model.add(layers.BatchNormalization())
# model.add(layers.ReLU())
#
# model.add(layers.Conv2DTranspose(64, (5,5), strides=(2,2), padding='same', use_bias=False))
# assert model.output_shape == (None, 50, 50, 64)
# model.add(layers.BatchNormalization())
# model.add(layers.ReLU())
#
# ## 최종적으로 50*50*64의 이미지가 나오도록 업샘플링링
#
# return model
latent_depth = 100
lr = 0.0001
def generator():
model = tf.keras.Sequential()
model.add(layers.Dense(25*25*128, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
assert model.output_shape == (None, 25,25,128)
## filter의 수, kernel(filter)의 size, padding = 'same'-> output_size를 input_size와 똑같이 함
model.add(layers.Conv2DTranspose(128, (5,5), strides=(1,1), padding='same', use_bias=False))
assert model.output_shape == (None, 25, 25, 128)
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Conv2DTranspose(64, (5,5),strides=(2,2),padding='same', use_bias=False ))
assert model.output_shape == (None, 50, 50, 64)
model.add(layers.BatchNormalization())
model.add(layers.ReLU())
model.add(layers.Conv2DTranspose(3, (5,5), strides=(2,2), padding='same', use_bias=False, activation='sigmoid'))
assert model.output_shape == (None, 100, 100, 3)
model.summary()
return model
# g_test = generator()
# test = np.random.randint(0,225, size=100)
# test = tf.random.normal([1,100])
# g_test(test)
generator = generator()
z_vector = tf.random,normal([1, latent_depth])
generated_image = generator(z_vector, training=True)
def discriminator():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5,5), strides=(2,2), padding='same', input_shape=[100,100,3]))
model.add(layers.ReLU(0.2))
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5,5), strides=(2,2), padding='same'))
model.add(layers.ReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
model.summary()
return model
##dcgan project and reshape layer?
discriminator = discriminator()
decision = discriminator(generated_image)
generator_optimizer = tf.keras.optimizers.Adam(lr=lr)
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
|
[
"ddiyoo@gmail.com"
] |
ddiyoo@gmail.com
|
4f9ba3ef8f3d66873ccc13f5f8d1521dfb124945
|
5d7798f4bf6bce76099af9245ea375bd17b0e104
|
/prime odd even.py
|
fbf3595c0866c288c7736656cb64fed0d5d2bb69
|
[] |
no_license
|
shivam11509330/sippython
|
4c02d6410bd8ae2110d9c7ede9fba7e9854a550a
|
4b558cbdf7886ad97dc886e672c7735943af4c9f
|
refs/heads/master
| 2020-06-03T07:39:39.862789
| 2019-07-24T06:18:35
| 2019-07-24T06:18:35
| 191,498,602
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 21 11:43:18 2019
@author: aa
"""
i=260
for i in range(260,280,2):
print(i,end=' ')
if i==270:
print('exit')
break
number=[i*1 for i in range(20,35)]
number
for i in number: print(i, "\t", i%2 > 0 , end = "\n")
|
[
"noreply@github.com"
] |
shivam11509330.noreply@github.com
|
f8703c2a2ac66515991dee68bb07669901a4be36
|
ba36ef3c5c3885ba8b0994dc379e45d66d13d329
|
/experimental/Multiple-Instance-Retrieval/gen_color_db.py
|
b81a9359555236d03faae1ffab486bd2fd752f58
|
[] |
no_license
|
kritiksoman/CV-and-Neural-Nets-Basic
|
74532ed78a7a09b556dd89704061e3259c172077
|
ef9b8f0c68b0db57c7b1d6a56e93799f9022b127
|
refs/heads/master
| 2022-09-23T00:48:00.969570
| 2022-09-03T08:12:43
| 2022-09-03T08:12:43
| 178,643,211
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,833
|
py
|
import cv2
import numpy as np
import scipy
from scipy.misc import imread
import cPickle as pickle
import random
import os
import matplotlib.pyplot as plt
trainPath = '/train new'
dirPathList=[f for f in os.listdir(os.getcwd()+trainPath)]
dirPathList.sort()
# Feature extractor
def extract_features(image_path, vector_size=32):
image = imread(image_path, mode="RGB")
try:
hist = cv2.calcHist(images=[image], channels=[0, 1, 2], mask=None,
histSize=[16, 16, 16], ranges=[0, 256] * 3)
dsc = hist.flatten().astype(np.uint8)
needed_size = (vector_size * 64)
if dsc.size < needed_size:
# if we have less the 32 descriptors then just adding zeros at the
# end of our feature vector
dsc = np.concatenate([dsc, np.zeros(needed_size - dsc.size)])
except cv2.error as e:
print 'Error: ', e
return None
return dsc
def batch_extractor(pickled_db_path="features.pck"):
imgIndex = 0
result = {}
for dirindex in dirPathList:
fileLoc = os.getcwd() + trainPath + '/' + dirindex
imagePathList = [f for f in os.listdir(fileLoc)]
imagePathList.sort()
# N += len(imagePathList)
for imagePath in imagePathList:
try:
f=os.path.join(fileLoc, imagePath)
print 'Extracting features from image %s' % f
name = dirindex + '_' +imagePath
result[name] = extract_features(f)
except Exception, e:
print e
print imagePath
with open(pickled_db_path, 'w') as fp:
pickle.dump(result, fp)
class Matcher(object):
def __init__(self, pickled_db_path="features.pck"):
with open(pickled_db_path) as fp:
self.data = pickle.load(fp)
self.names = []
self.matrix = []
for k, v in self.data.iteritems():
self.names.append(k)
self.matrix.append(v)
self.matrix = np.array(self.matrix)
self.names = np.array(self.names)
def cos_cdist(self, vector):
# getting cosine distance between search image and images database
v = vector.reshape(1, -1)
return scipy.spatial.distance.cdist(self.matrix, v, 'cosine').reshape(-1)
def match(self, image_path, topn=5):
features = extract_features(image_path)
img_distances = self.cos_cdist(features)
# getting top 5 records
nearest_ids = np.argsort(img_distances)[:topn].tolist()
nearest_img_paths = self.names[nearest_ids].tolist()
return nearest_img_paths, img_distances[nearest_ids].tolist()
def show_img(path):
img = imread(path, mode="RGB")
plt.imshow(img)
plt.show()
batch_extractor()
print('Color histogram database generated.')
|
[
"noreply@github.com"
] |
kritiksoman.noreply@github.com
|
9e1578b4167309ebd7988b94c37febefd051a916
|
2be83155e50126aae93ec17e94492ee5f0691e30
|
/configurable_http_proxy_test/test_proxy.py
|
f0985acaa5c2131bb8e378e94e35ff56564aee4e
|
[
"MIT"
] |
permissive
|
manics/python-configurable-http-proxy
|
7be9cb453ba47c82673176e42a366fdaede014c8
|
d24458f1868fb0de6b4f5cb4d310301433cc0db4
|
refs/heads/main
| 2023-09-02T12:53:23.156803
| 2021-09-01T07:13:41
| 2021-09-01T07:13:41
| 415,345,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,984
|
py
|
import datetime
import json
import os
import pytest
from tornado.httpclient import HTTPClientError, HTTPRequest
from tornado.httpserver import HTTPServer
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, get_async_test_timeout, gen_test
from tornado.web import Application, RequestHandler
from tornado.websocket import WebSocketHandler, websocket_connect
from configurable_http_proxy.configproxy import PythonProxy
from configurable_http_proxy_test.testutil import RESOURCES_PATH, pytest_regex
class TargetHandler(WebSocketHandler):
def initialize(self, target=None, path=None, **kwargs):
super().initialize(**kwargs)
self.target = target
self.path = path
async def get(self, path=None):
if self.request.headers.get("Upgrade", "").lower() == "websocket":
await WebSocketHandler.get(self, path)
return
reply = {
"target": self.target,
"path": self.path,
"url": self.request.uri,
"headers": dict(self.request.headers.get_all()),
}
self.set_status(200)
self.set_header("Content-Type", "application/json")
if self.get_argument("with_set_cookie"):
# Values that set-cookie can take:
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie
values = {
"Secure": "",
"HttpOnly": "",
"SameSite": "None",
"Path": "/",
"Domain": "example.com",
"Max-Age": "999999",
"Expires": "Fri, 01 Oct 2020 06:12:16 GMT", # .strftime('%a, %d %b %Y %H:%M:%S %Z')
}
self.add_header("Set-Cookie", "key=val")
for name, val in values.items():
self.add_header("Set-Cookie", f"{name}_key=val; {name}={val}")
combined = "; ".join((f"{name}={val}" for name, val in values.items()))
self.add_header("Set-Cookie", f"combined_key=val; {combined}")
self.write(json.dumps(reply))
self.finish()
def open(self, path=None):
self.write_message("connected")
def on_message(self, message):
reply = {
"target": self.target,
"path": self.path,
"message": message,
}
self.write_message(json.dumps(reply))
class RedirectingTargetHandler(RequestHandler):
def initialize(self, target=None, path=None, redirect_to=None, **kwargs):
super().initialize(**kwargs)
self.target = target
self.path = path
self.redirect_to = redirect_to
def get(self, path=None):
self.set_header("Location", self.redirect_to)
self.set_status(301)
self.finish()
class ErrorTargetHandler(RequestHandler):
def initialize(self, target=None, path=None, **kwargs):
super().initialize(**kwargs)
def get(self, path=None):
self.set_header("Content-Type", "text/plain")
self.write(self.get_query_argument("url"))
self.finish()
class TestProxy(AsyncHTTPTestCase):
def _add_server(self, server):
servers = getattr(self, "_created_http_servers", [])
servers.append(server)
self._created_http_servers = servers
def _add_target_route(self, path, target_path="", handler=TargetHandler, **kwargs):
sock, port = bind_unused_port()
target = f"http://127.0.0.1:{port}" + target_path
app = Application(
[
(r"/(.*)", handler, {"target": target, "path": path, **kwargs}),
]
)
http_server = HTTPServer(app)
http_server.add_sockets([sock])
self._add_server(http_server)
self.proxy.add_route(path, {"target": target})
# routes are created with an activity timestamp artificially shifted into the past
# so that activity can more easily be measured
self.proxy._routes.update("/", {"last_activity": self.start_time})
return target
def tearDown(self):
for server in self._created_http_servers:
server.stop()
self.io_loop.run_sync(server.close_all_connections, timeout=get_async_test_timeout())
return super().tearDown()
def get_app(self):
self.proxy = PythonProxy()
self.start_time = datetime.datetime.now() - datetime.timedelta(hours=1)
self._add_target_route(path="/")
return self.proxy.proxy_app
def fetch(self, path, raise_error=True, **kwargs):
return super().fetch(path, raise_error=raise_error, **kwargs)
def test_basic_http_request(self):
now = datetime.datetime.now()
last_hour = now - datetime.timedelta(hours=1)
self.proxy._routes.update("/", {"last_activity": last_hour})
resp = self.fetch("/")
reply = json.loads(resp.body)
assert reply["path"] == "/"
# check last_activity was updated
route = self.proxy.get_route("/")
assert route["last_activity"] > now
# check the other HTTP methods too
resp = self.fetch("/", method="HEAD", raise_error=False)
assert resp.code == 405
resp = self.fetch("/", method="OPTIONS", raise_error=False)
assert resp.code == 405
resp = self.fetch("/", method="POST", body="", raise_error=False)
assert resp.code == 405
resp = self.fetch("/", method="DELETE", raise_error=False)
assert resp.code == 405
resp = self.fetch("/", method="PATCH", body="", raise_error=False)
assert resp.code == 405
resp = self.fetch("/", method="PUT", body="", raise_error=False)
assert resp.code == 405
@gen_test
def test_basic_websocket_request(self):
now = datetime.datetime.now()
route = self.proxy.get_route("/")
assert route["last_activity"] <= now
ws_client = yield websocket_connect(self.get_url("/").replace("http:", "ws:"))
ws_client.write_message("hi")
response = yield ws_client.read_message()
assert response == "connected"
response = yield ws_client.read_message()
reply = json.loads(response)
assert reply["path"] == "/"
assert reply["message"] == "hi"
# check last_activity was updated
route = self.proxy.get_route("/")
assert route["last_activity"] > now
def test_sending_headers(self):
resp = self.fetch("/", headers={"testing": "OK"})
reply = json.loads(resp.body)
assert reply["path"] == "/"
assert reply["headers"].get("Testing") == "OK"
def test_proxy_request_event_can_modify_header(self):
pytest.skip("proxy_request event is not supported")
# it("proxyRequest event can modify headers", function (done) {
# var called = {};
# proxy.on("proxyRequest", function (req, res) {
# req.headers.testing = "Test Passed";
# called.proxyRequest = true;
# });
# r(proxyUrl)
# .then(function (body) {
# body = JSON.parse(body);
# expect(called.proxyRequest).toBe(true);
# expect(body).toEqual(
# jasmine.objectContaining({
# path: "/",
# })
# );
# expect(body.headers).toEqual(
# jasmine.objectContaining({
# testing: "Test Passed",
# })
# );
# })
# .then(done);
# });
def test_target_path_is_prepended_by_default(self):
self._add_target_route(path="/bar", target_path="/foo")
resp = self.fetch("/bar/rest/of/it")
reply = json.loads(resp.body)
assert reply["path"] == "/bar"
assert reply["url"] == "/foo/bar/rest/of/it"
def test_handle_path_with_querystring(self):
self._add_target_route(path="/bar", target_path="/foo")
resp = self.fetch("/bar?query=foo")
reply = json.loads(resp.body)
assert reply["path"] == "/bar"
assert reply["url"] == "/foo/bar?query=foo"
assert reply["target"] == pytest_regex(r"http://127.0.0.1:\d+/foo")
def test_handle_path_with_uri_encoding(self):
self._add_target_route(path="/b@r/b r", target_path="/foo")
resp = self.fetch("/b%40r/b%20r/rest/of/it")
reply = json.loads(resp.body)
assert reply["path"] == "/b@r/b r"
assert reply["url"] == "/foo/b%40r/b%20r/rest/of/it"
def test_handle_path_with_uri_encoding_partial(self):
self._add_target_route(path="/b@r/b r", target_path="/foo")
resp = self.fetch("/b@r/b%20r/rest/of/it")
reply = json.loads(resp.body)
assert reply["path"] == "/b@r/b r"
assert reply["url"] == "/foo/b%40r/b%20r/rest/of/it"
def test_target_without_prepend_path(self):
self.proxy.prepend_path = False
self._add_target_route(path="/bar", target_path="/foo")
resp = self.fetch("/bar/rest/of/it")
reply = json.loads(resp.body)
assert reply["path"] == "/bar"
assert reply["url"] == "/bar/rest/of/it"
def test_target_without_include_prefix(self):
self.proxy.include_prefix = False
self._add_target_route(path="/bar", target_path="/foo")
resp = self.fetch("/bar/rest/of/it")
reply = json.loads(resp.body)
assert reply["path"] == "/bar"
assert reply["url"] == "/foo/rest/of/it"
def test_default_target_config(self):
proxy = PythonProxy({"default_target": "http://127.0.0.1:12345"})
route = proxy.get_route("/")
assert route["target"] == "http://127.0.0.1:12345"
def test_storage_backend_config_invalid(self):
with pytest.raises(AssertionError, match="Unknown backend provided 'invalid_storage'"):
PythonProxy({"storage_backend": "invalid_storage"})
def test_storage_backend_config(self):
# With a importable string
proxy = PythonProxy(
{"storage_backend": "configurable_http_proxy_test.dummy_store.PlugableDummyStore"}
)
assert type(proxy._routes).__name__ == "PlugableDummyStore"
# With a class
from configurable_http_proxy_test.dummy_store import PlugableDummyStore
proxy = PythonProxy({"storage_backend": PlugableDummyStore})
assert type(proxy._routes).__name__ == "PlugableDummyStore"
def test_without_include_prefix_and_without_prepend_path(self):
self.proxy.include_prefix = False
self.proxy.prepend_path = False
self._add_target_route(path="/bar", target_path="/foo")
resp = self.fetch("/bar/rest/of/it")
reply = json.loads(resp.body)
assert reply["path"] == "/bar"
assert reply["url"] == "/rest/of/it"
@pytest.mark.xfail(reason="host_routing doesnt work")
def test_host_routing_config(self):
self.proxy.host_routing = True
host = "test.localhost.org"
target_url = self._add_target_route(path="/" + host)
resp = self.fetch(f"http://{host}:{self.get_http_port()}/some/path")
reply = json.loads(resp.body)
assert reply["target"] == target_url # "http://127.0.0.1:" + testPort,
assert reply["url"] == "/some/path"
def test_last_activity_not_updated_on_errors(self):
now = datetime.datetime.now()
self.proxy.remove_route("/")
self.proxy.add_route("/missing", {"target": "https://127.0.0.1:12345"})
self.proxy._routes.update("/missing", {"last_activity": now})
# fail a http activity
resp = self.fetch("/missing/prefix", raise_error=False)
assert resp.code == 503 # This should be 503 ??
assert self.proxy.get_route("/missing")["last_activity"] == now
@gen_test
def test_last_activity_not_updated_on_errors_websocket(self):
now = datetime.datetime.now()
self.proxy.remove_route("/")
self.proxy.add_route("/missing", {"target": "https://127.0.0.1:12345"})
self.proxy._routes.update("/missing", {"last_activity": now})
# fail a websocket activity
with pytest.raises(HTTPClientError, match="HTTP 503: Service Unavailable"):
yield websocket_connect(self.get_url("/missing/ws").replace("http:", "ws:"))
# expect an error, since there is no websocket handler - check last_activity was not updated
route = self.proxy.get_route("/missing")
assert route["last_activity"] == now
def test_custom_error_target(self):
sock, port = bind_unused_port()
app = Application([(r"/(.*)", ErrorTargetHandler)])
http_server = HTTPServer(app)
http_server.add_sockets([sock])
self._add_server(http_server)
self.proxy.error_target = f"http://127.0.0.1:{port}"
self.proxy.remove_route("/")
resp = self.fetch("/foo/bar", raise_error=False)
assert resp.code == 404
assert resp.headers["content-type"] == "text/plain"
assert resp.body == b"/foo/bar"
def test_custom_error_path(self):
self.proxy.error_path = os.path.join(RESOURCES_PATH, "errors")
self.proxy.remove_route("/")
self.proxy.add_route("/missing", {"target": "http://127.0.0.1:54321"})
resp = self.fetch("/nope", raise_error=False)
assert resp.code == 404
assert resp.headers["content-type"] == "text/html"
assert b"<b>404'D!</b>" in resp.body
resp = self.fetch("/missing/prefix", raise_error=False)
assert resp.code == 503
assert resp.headers["content-type"] == "text/html"
assert b"<b>UNKNOWN ERROR</b>" in resp.body
def test_default_error_html(self):
self.proxy.remove_route("/")
self.proxy.add_route("/missing", {"target": "http://127.0.0.1:54321"})
resp = self.fetch("/nope", raise_error=False)
assert resp.code == 404
assert "text/html" in resp.headers["content-type"]
assert b"<title>404: Not Found</title>" in resp.body
resp = self.fetch("/missing/prefix", raise_error=False)
assert resp.code == 503
assert "text/html" in resp.headers["content-type"]
assert b"<title>503: Service Unavailable</title>" in resp.body
def test_redirect_location_untouched_without_rewrite_option(self):
redirect_to = "http://foo.com:12345/whatever"
target_url = self._add_target_route(
"/external/urlpath",
target_path="/internal/urlpath/",
handler=RedirectingTargetHandler,
redirect_to=redirect_to,
)
resp = self.fetch("/external/urlpath/rest/of/it", follow_redirects=False, raise_error=False)
assert resp.code == 301
assert resp.headers["Location"] == redirect_to
def test_redirect_location_with_rewriting(self):
pytest.xfail(reason="rewrite not supported")
# it("Redirect location with rewriting", function (done) {
# var proxyPort = 55556;
# var options = {
# protocolRewrite: "https",
# autoRewrite: true,
# };
# // where the backend server redirects us.
# // Note that http-proxy requires (logically) the redirection to be to the same (internal) host.
# var redirectTo = "https://127.0.0.1:" + testPort + "/whatever";
# var expectedRedirect = "https://127.0.0.1:" + proxyPort + "/whatever";
# util
# .setupProxy(proxyPort, options, [])
# .then((proxy) =>
# util.addTargetRedirecting(
# proxy,
# "/external/urlpath/",
# testPort,
# "/internal/urlpath/",
# redirectTo
# )
# )
# .then(() => r("http://127.0.0.1:" + proxyPort + "/external/urlpath/"))
# .then((body) => done.fail("Expected 301"))
# .catch((err) => {
# expect(err.statusCode).toEqual(301);
# expect(err.response.headers.location).toEqual(expectedRedirect);
# })
# .then(done);
# });
def test_health_check_request(self):
resp = self.fetch("/_chp_healthz")
reply = json.loads(resp.body)
assert reply == {"status": "OK"}
def test_target_not_found(self):
self.proxy.remove_route("/")
resp = self.fetch("/unknown", raise_error=False)
assert resp.code == 404
assert "text/html" in resp.headers["content-type"]
assert b"<title>404: Not Found</title>" in resp.body
@gen_test
def test_target_not_found_websocket(self):
self.proxy.remove_route("/")
with pytest.raises(HTTPClientError, match="HTTP 404: Not Found"):
yield websocket_connect(self.get_url("/unknown").replace("http:", "ws:"))
@gen_test
def test_websocket_failure_due_to_request(self):
# The tornado websocket internally checks for: header[ORIGIN] == header[HOST] if both the headers are present.
# This test checks that we close the ws_client correctly in case of such errors
with pytest.raises(HTTPClientError, match="HTTP 403: Forbidden"):
req = HTTPRequest(
self.get_url("/").replace("http:", "ws:"),
headers={
"Origin": "http://origin.com",
"Host": "http://host.com",
},
)
ws_client = yield websocket_connect(req)
def test_custom_headers(self):
self.proxy.custom_headers = {"testing_from_custom": "OK"}
resp = self.fetch("/", headers={"testing_from_request": "OK"})
reply = json.loads(resp.body)
assert reply["path"] == "/"
assert reply["headers"].get("Testing_from_request") == "OK"
assert reply["headers"].get("Testing_from_custom") == "OK"
def test_custom_headers_higher_priority(self):
self.proxy.custom_headers = {"testing": "from_custom"}
resp = self.fetch("/", headers={"testing": "from_request"})
reply = json.loads(resp.body)
assert reply["path"] == "/"
assert reply["headers"].get("Testing") == "from_custom"
def test_receiving_headers_setcookie(self):
# When the same header has multiple values - it needs to be handled correctly.
resp = self.fetch("/?with_set_cookie=1")
headers = list(resp.headers.get_all())
cookies = {}
for header_name, header in headers:
if header_name.lower() !='set-cookie':
continue
key, val = header.split("=", 1)
cookies[key] = val
assert "key" in cookies
assert cookies['key'] == 'val'
assert "combined_key" in cookies
assert cookies['combined_key'] == 'val; Secure=; HttpOnly=; SameSite=None; Path=/; Domain=example.com; Max-Age=999999; Expires=Fri, 01 Oct 2020 06:12:16 GMT'
for prefix in ["Secure", "HttpOnly", "SameSite", "Path", "Domain", "Max-Age", "Expires"]:
assert prefix + "_key" in cookies
|
[
"abdealikothari@gmail.com"
] |
abdealikothari@gmail.com
|
7bdfa6e952210f3f7b21f8dfcca7de529db2bec7
|
565ccbe9e8d038858caa76fc27190e0db4fceeda
|
/204/lines.py
|
a435e7250ef2d9c48d5cd43c4f118ccb41e04d6b
|
[] |
no_license
|
Grigs-b/proggit
|
62ab7341d29d3607f6a1243a845a0049d322510d
|
f52b7b75ae847b2c106384fe93a37b62301cb1e7
|
refs/heads/master
| 2020-06-04T15:05:49.601613
| 2015-03-15T02:26:52
| 2015-03-15T02:26:52
| 29,889,522
| 1
| 0
| null | 2015-02-05T21:18:29
| 2015-01-27T00:31:55
|
Go
|
UTF-8
|
Python
| false
| false
| 642
|
py
|
if __name__ == "__main__":
find = raw_input("Enter line to find in Macbeth: ")
lines = []
with open('204/macbeth.txt', 'r') as f:
lines = f.readlines()
# we're popping from the list, so reverse it
lines.reverse()
toparse = []
building = ""
while lines:
current = lines.pop()
while lines and current.startswith(" "):
building = "".join([building, current.lstrip(" ")])
current = lines.pop()
toparse.append(building)
building = ""
results = [block for block in toparse if find in block]
for result in results:
print(result)
|
[
"rgrigsby@inovasolutions.com"
] |
rgrigsby@inovasolutions.com
|
550c95dbcf2fdd7a9a620c53dd2c5d70185e544e
|
9cacd9c15a958fd5cdd0f618df6534d2686c2000
|
/source/utils/corruptor.py
|
035530784b2353c12c5912b891b93fcc3d5190bd
|
[
"MIT"
] |
permissive
|
1pkg/neura
|
e274f6e09765659613264b88d0b1706fbb5b83a9
|
b5ac79d2141a556f9b488b6ae07cc89f8b0cbccd
|
refs/heads/master
| 2020-05-17T16:32:16.186559
| 2019-04-27T22:05:01
| 2020-04-26T17:03:10
| 183,821,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,943
|
py
|
from os import path, listdir, makedirs
from shutil import rmtree
from PIL import Image, ImageFile, ImageFilter
ImageFile.LOAD_TRUNCATED_IMAGES = True
class Corruptor:
def __init__(
self,
bundle,
image_quality=25,
image_blur=1,
image_size_factor=4,
with_crop=False,
crop_height=640,
crop_width=360,
verbose=True,
):
path_base = path.join(
path.dirname(__file__),
'..',
'..',
'dump',
)
self.__path_train = path.join(path_base, 'train', bundle)
self.__path_test = path.join(path_base, 'test', bundle)
self.__verbose = verbose
self.__image_quality = image_quality
self.__image_blur = image_blur
self.__image_size_factor = image_size_factor
self.__with_crop = with_crop
self.__crop_height = crop_height
self.__crop_width = crop_width
def run_walk(self):
data_path_src = f'{self.__path_train}_src'
data_path_mod = f'{self.__path_train}_mod'
self.__run_walk_internal(data_path_src, data_path_mod)
data_path_src = f'{self.__path_test}_src'
data_path_mod = f'{self.__path_test}_mod'
self.__run_walk_internal(data_path_src, data_path_mod)
def __run_walk_internal(self, path_src, path_dst):
if not path.exists(path_src):
return
if path.exists(path_dst):
rmtree(path_dst)
makedirs(path_dst)
index = 0
path_list = listdir(path_src)
for file_name in path_list:
file_name_src = path.join(path_src, file_name)
file_name_dst = path.join(path_dst, file_name)
if (path.isfile(file_name_src)):
index += 1
if self.__with_crop:
self.__run_single_crop(file_name_src)
self.__run_single_internal(file_name_src, file_name_dst)
if self.__verbose and index % 500 == 0:
msg = f'corruptor processing {path_src} | done {index} of {len(path_list)}'
print(msg)
def __run_single_internal(self, path_src, path_dst):
with Image.open(path_src) as image:
image = image.convert('RGB')
if self.__image_size_factor:
image = image.resize((
int(image.size[0] / self.__image_size_factor),
int(image.size[1] / self.__image_size_factor),
), Image.NEAREST)
blur = ImageFilter.GaussianBlur(radius=self.__image_blur)
image = image.filter(blur)
image.save(
path_dst,
'JPEG',
quality=self.__image_quality,
optimize=True,
progressive=True,
)
def __run_single_crop(self, path_src):
with Image.open(path_src) as image:
image = image.convert('RGB')
width, height = image.size
desire_height = self.__crop_height
desire_width = self.__crop_width
ratio_width = desire_width / width
ratio_height = desire_height / height
if ratio_width > ratio_height:
new_size = (desire_width, int(height * ratio_width))
else:
new_size = (int(width * ratio_height), desire_height)
image = image.resize(new_size, Image.NEAREST)
left = int((new_size[0] - desire_width) / 2.0)
top = int((new_size[1] - desire_height) / 2.0)
right = int((new_size[0] + desire_width) / 2.0)
bottom = int((new_size[1] + desire_height) / 2.0)
image = image.crop((left, top, right, bottom))
image.save(
path_src,
'JPEG',
optimize=True,
progressive=True,
)
|
[
"1pkg@protonmail.com"
] |
1pkg@protonmail.com
|
9ddd34f1084a35a7a317424f6ad7f05bc65c780e
|
cbd9938756f5717adb27929481e2ff3176958e4f
|
/tests/integ/aws/test_athena.py
|
372fb3b9493eb6cfeddab4cdc9d014755185302e
|
[
"MIT"
] |
permissive
|
yaojiach/red-panda
|
3a8c16dabff6884697e8dd47ec1a597c2415de7c
|
40e625e9b983e6930d5556ceba1778286b0e9c3d
|
refs/heads/master
| 2022-10-08T13:30:44.760183
| 2020-09-14T21:50:19
| 2020-09-14T21:50:19
| 339,915,930
| 2
| 0
|
MIT
| 2021-02-18T02:40:55
| 2021-02-18T02:40:55
| null |
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
import pytest
from red_panda.aws.athena import AthenaUtils
import logging
LOGGER = logging.getLogger(__name__)
@pytest.fixture
def athena_utils(aws_config, athena_result_location, aws_region):
return AthenaUtils(
aws_config, athena_result_location, region_name=aws_region, work_group="primary"
)
def test_athena_run_query_return_df(athena_utils, glue_data, glue_db, glue_table_name):
sql = f"select * from {glue_db}.{glue_table_name}"
assert athena_utils.run_query(sql, as_df=True).equals(glue_data)
def test_athena_run_query_return_list(
athena_utils, glue_data, glue_db, glue_table_name
):
sql = f"select * from {glue_db}.{glue_table_name}"
assert all(
[
x == y
for x, y in zip(
athena_utils.run_query(sql, as_df=False), glue_data.to_dict("records")
)
]
)
def test_athena_run_query_use_cache(athena_utils, glue_db, glue_table_name):
# TODO: more robust test
import random
import string
sql = f"""select col0 as {''.join(random.choices(string.ascii_lowercase, k=10))}
from {glue_db}.{glue_table_name}"""
athena_utils.run_query(sql)
query_id_1 = athena_utils.cursor.query_id
athena_utils.run_query(sql, use_cache=True)
query_id_2 = athena_utils.cursor.query_id
assert query_id_1 == query_id_2
|
[
"jiachen.yao@outlook.com"
] |
jiachen.yao@outlook.com
|
bc7eb55f96a4bb69c658419fd7c4a6141e27d36f
|
662f828a589996e5efab566c5dad122394951595
|
/development_build_252_arm64_eloquent/install/launch_files/share/launch_files/launch/br.launch.py
|
6c4d2e46ef961c7874ee576f9c48aee8939b3d7a
|
[] |
no_license
|
BytesRobotics/br-travis-builds
|
74334bd3c6c39bcbda9167b8028afe93602e24c3
|
7fb57e265e9c6016768715ac80be49c7d070a450
|
refs/heads/master
| 2021-04-10T03:55:39.638150
| 2020-06-01T18:14:34
| 2020-06-01T18:14:34
| 248,908,080
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 85
|
py
|
/home/travis/build/BytesRobotics/br-core/ros2_ws/src/launch_files/launch/br.launch.py
|
[
"travis@travis-ci.org"
] |
travis@travis-ci.org
|
9e8d8780f15120d8c03a5558cd473678d4984077
|
da738cb496c189c880e09c812874db9a48574df1
|
/back/app/bets/migrations/0005_auto_20191203_2336.py
|
bd93d5355250cb1434ec62b486ee243614d17068
|
[] |
no_license
|
SNVC1/mipt-fullstack2019
|
2972ed20e146394005fa41a29a25fc85674b2271
|
5dd6d5e61a8312917d68d863f6dca5876fad352a
|
refs/heads/master
| 2023-01-08T10:40:58.535068
| 2020-11-10T17:59:19
| 2020-11-10T17:59:19
| 208,422,125
| 0
| 0
| null | 2022-12-12T18:12:35
| 2019-09-14T10:06:21
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 494
|
py
|
# Generated by Django 2.2.7 on 2019-12-03 20:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bets', '0004_auto_20191203_2328'),
]
operations = [
migrations.RenameField(
model_name='bet',
old_name='game_id',
new_name='game',
),
migrations.RenameField(
model_name='bet',
old_name='player_id',
new_name='player',
),
]
|
[
"iggy99@mail.ru"
] |
iggy99@mail.ru
|
49849b563515cc94c624f7216ba44dc295d65586
|
d34c8fafcd4299c6e7514bc9fcb24f9330579220
|
/tests/test_typeguard_py36.py
|
488d2892d68e6ce9491406fa0dbabcabd1cc39fc
|
[
"MIT"
] |
permissive
|
MSeal/typeguard
|
5d1b1d78078945bce38f56cfa3dc78d3a1bfd90e
|
40d78cb7c09bcd2ab08989053816619db713d8c2
|
refs/heads/master
| 2020-07-12T11:33:31.858356
| 2019-08-26T20:59:17
| 2019-08-26T20:59:17
| 204,808,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,152
|
py
|
from typing import AsyncGenerator
import pytest
from typeguard import TypeChecker, typechecked
class TestTypeChecked:
def test_async_generator(self):
async def run_generator():
@typechecked
async def genfunc() -> AsyncGenerator[int, str]:
values.append((yield 2))
values.append((yield 3))
values.append((yield 4))
gen = genfunc()
value = await gen.asend(None)
with pytest.raises(StopAsyncIteration):
while True:
value = await gen.asend(str(value))
assert isinstance(value, int)
values = []
coro = run_generator()
try:
for elem in coro.__await__():
print(elem)
except StopAsyncIteration as exc:
values = exc.value
assert values == ['2', '3', '4']
def test_async_generator_bad_yield(self):
@typechecked
async def genfunc() -> AsyncGenerator[int, str]:
yield 'foo'
gen = genfunc()
with pytest.raises(TypeError) as exc:
next(gen.__anext__().__await__())
exc.match('type of value yielded from generator must be int; got str instead')
def test_async_generator_bad_send(self):
@typechecked
async def genfunc() -> AsyncGenerator[int, str]:
yield 1
yield 2
gen = genfunc()
pytest.raises(StopIteration, next, gen.__anext__().__await__())
with pytest.raises(TypeError) as exc:
next(gen.asend(2).__await__())
exc.match('type of value sent to generator must be str; got int instead')
class TestTypeChecker:
@staticmethod
async def asyncgenfunc() -> AsyncGenerator[int, None]:
yield 1
@pytest.fixture
def checker(self):
return TypeChecker(__name__)
def test_async_generator(self, checker):
"""Make sure that the type checker does not complain about the None return value."""
with checker, pytest.warns(None) as record:
self.asyncgenfunc()
assert len(record) == 0
|
[
"alex.gronholm@nextday.fi"
] |
alex.gronholm@nextday.fi
|
f07db021bf7da80d15a8e6950b5aff6072963ca9
|
bfbfe1cc7b285c85cc5a59690b837887bb198dd8
|
/api/api/urls.py
|
9e5830e2c1abf23c41aaba24252e9da641eaaccc
|
[] |
no_license
|
pavoli/django_restapi
|
cca4c5387e46eb95cd7d65d8c80a9fa9d746fe03
|
f5627142ded4f4f3e81afcf77ef2490237ed8ccc
|
refs/heads/master
| 2021-09-25T18:19:20.545142
| 2021-06-10T21:25:43
| 2021-06-10T21:25:43
| 247,872,644
| 0
| 0
| null | 2021-09-22T18:45:45
| 2020-03-17T03:41:36
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 906
|
py
|
"""api URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('restapi.urls')),
path('', TemplateView.as_view(template_name='index.html'))
]
|
[
"pavel.olifer@gmail.com"
] |
pavel.olifer@gmail.com
|
f626e4c5e87f1174bd5d5a600cd96e4fa1457a78
|
7d97ab8643536fbec2a41e3f3171d4813b066351
|
/eetree_ext_pico_breath_screen_light_main.py
|
5abbf5f7ca32a2e45f9ca1e97451a38a3a577720
|
[] |
no_license
|
yoyojacky/rpico
|
a05dc33c8a26cfe66db38136fbeec49221b888ac
|
d1fb7ba126e50148d8507635702f526bb97f9611
|
refs/heads/main
| 2023-04-22T00:30:03.207182
| 2021-04-23T06:42:41
| 2021-04-23T06:42:41
| 357,458,048
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
from machine import Pin, PWM
from time import sleep
pwm = PWM(Pin(15))
pwm.freq(1000)
while True:
for duty in range(65025):
pwm.duty_u16(duty)
sleep(0.0001)
for duty in range(65025, 0, -1):
pwm.duty_u16(duty)
sleep(0.0001)
|
[
"yoyojacky2009@gmail.com"
] |
yoyojacky2009@gmail.com
|
f9d7a76a549884b1446f5afa5435bb211642a89a
|
784c18ff7441b117abf33290ba4c25270fc1ba34
|
/projects/maw/marshal1/env/Scripts/runxlrd.py
|
64d6e0d104a5138da93f550282b9a952cf1ffe47
|
[] |
no_license
|
kshitijvr93/Django-Work-Library
|
a1b4dd0fd2936c3303cdce9b508bd9b63c4957cc
|
dd173422d1347f8e464facd2dc137e439df8a34f
|
refs/heads/master
| 2022-12-12T08:23:08.032627
| 2018-12-30T06:59:18
| 2018-12-30T06:59:18
| 152,334,131
| 0
| 0
| null | 2022-12-07T23:52:46
| 2018-10-09T23:21:56
|
Python
|
UTF-8
|
Python
| false
| false
| 16,334
|
py
|
#!c:\users\kshit\downloads\library_maw_project\marshal\projects\maw\marshal1\env\scripts\python.exe
# Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This script is part of the xlrd package, which is released under a
# BSD-style licence.
from __future__ import print_function
cmd_doc = """
Commands:
2rows Print the contents of first and last row in each sheet
3rows Print the contents of first, second and last row in each sheet
bench Same as "show", but doesn't print -- for profiling
biff_count[1] Print a count of each type of BIFF record in the file
biff_dump[1] Print a dump (char and hex) of the BIFF records in the file
fonts hdr + print a dump of all font objects
hdr Mini-overview of file (no per-sheet information)
hotshot Do a hotshot profile run e.g. ... -f1 hotshot bench bigfile*.xls
labels Dump of sheet.col_label_ranges and ...row... for each sheet
name_dump Dump of each object in book.name_obj_list
names Print brief information for each NAME record
ov Overview of file
profile Like "hotshot", but uses cProfile
show Print the contents of all rows in each sheet
version[0] Print versions of xlrd and Python and exit
xfc Print "XF counts" and cell-type counts -- see code for details
[0] means no file arg
[1] means only one file arg i.e. no glob.glob pattern
"""
options = None
if __name__ == "__main__":
PSYCO = 0
import xlrd
import sys, time, glob, traceback, gc
from xlrd.timemachine import xrange, REPR
class LogHandler(object):
def __init__(self, logfileobj):
self.logfileobj = logfileobj
self.fileheading = None
self.shown = 0
def setfileheading(self, fileheading):
self.fileheading = fileheading
self.shown = 0
def write(self, text):
if self.fileheading and not self.shown:
self.logfileobj.write(self.fileheading)
self.shown = 1
self.logfileobj.write(text)
null_cell = xlrd.empty_cell
def show_row(bk, sh, rowx, colrange, printit):
if bk.ragged_rows:
colrange = range(sh.row_len(rowx))
if not colrange: return
if printit: print()
if bk.formatting_info:
for colx, ty, val, cxfx in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r, xfx: %s"
% (xlrd.colname(colx), rowx+1, ty, val, cxfx))
else:
for colx, ty, val, _unused in get_row_data(bk, sh, rowx, colrange):
if printit:
print("cell %s%d: type=%d, data: %r" % (xlrd.colname(colx), rowx+1, ty, val))
def get_row_data(bk, sh, rowx, colrange):
result = []
dmode = bk.datemode
ctys = sh.row_types(rowx)
cvals = sh.row_values(rowx)
for colx in colrange:
cty = ctys[colx]
cval = cvals[colx]
if bk.formatting_info:
cxfx = str(sh.cell_xf_index(rowx, colx))
else:
cxfx = ''
if cty == xlrd.XL_CELL_DATE:
try:
showval = xlrd.xldate_as_tuple(cval, dmode)
except xlrd.XLDateError as e:
showval = "%s:%s" % (type(e).__name__, e)
cty = xlrd.XL_CELL_ERROR
elif cty == xlrd.XL_CELL_ERROR:
showval = xlrd.error_text_from_code.get(cval, '<Unknown error code 0x%02x>' % cval)
else:
showval = cval
result.append((colx, cty, showval, cxfx))
return result
def bk_header(bk):
print()
print("BIFF version: %s; datemode: %s"
% (xlrd.biff_text_from_num[bk.biff_version], bk.datemode))
print("codepage: %r (encoding: %s); countries: %r"
% (bk.codepage, bk.encoding, bk.countries))
print("Last saved by: %r" % bk.user_name)
print("Number of data sheets: %d" % bk.nsheets)
print("Use mmap: %d; Formatting: %d; On demand: %d"
% (bk.use_mmap, bk.formatting_info, bk.on_demand))
print("Ragged rows: %d" % bk.ragged_rows)
if bk.formatting_info:
print("FORMATs: %d, FONTs: %d, XFs: %d"
% (len(bk.format_list), len(bk.font_list), len(bk.xf_list)))
if not options.suppress_timing:
print("Load time: %.2f seconds (stage 1) %.2f seconds (stage 2)"
% (bk.load_time_stage_1, bk.load_time_stage_2))
print()
def show_fonts(bk):
print("Fonts:")
for x in xrange(len(bk.font_list)):
font = bk.font_list[x]
font.dump(header='== Index %d ==' % x, indent=4)
def show_names(bk, dump=0):
bk_header(bk)
if bk.biff_version < 50:
print("Names not extracted in this BIFF version")
return
nlist = bk.name_obj_list
print("Name list: %d entries" % len(nlist))
for nobj in nlist:
if dump:
nobj.dump(sys.stdout,
header="\n=== Dump of name_obj_list[%d] ===" % nobj.name_index)
else:
print("[%d]\tName:%r macro:%r scope:%d\n\tresult:%r\n"
% (nobj.name_index, nobj.name, nobj.macro, nobj.scope, nobj.result))
def print_labels(sh, labs, title):
if not labs:return
for rlo, rhi, clo, chi in labs:
print("%s label range %s:%s contains:"
% (title, xlrd.cellname(rlo, clo), xlrd.cellname(rhi-1, chi-1)))
for rx in xrange(rlo, rhi):
for cx in xrange(clo, chi):
print(" %s: %r" % (xlrd.cellname(rx, cx), sh.cell_value(rx, cx)))
def show_labels(bk):
# bk_header(bk)
hdr = 0
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
clabs = sh.col_label_ranges
rlabs = sh.row_label_ranges
if clabs or rlabs:
if not hdr:
bk_header(bk)
hdr = 1
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
print_labels(sh, clabs, 'Col')
print_labels(sh, rlabs, 'Row')
if bk.on_demand: bk.unload_sheet(shx)
def show(bk, nshow=65535, printit=1):
bk_header(bk)
if 0:
rclist = xlrd.sheet.rc_stats.items()
rclist = sorted(rclist)
print("rc stats")
for k, v in rclist:
print("0x%04x %7d" % (k, v))
if options.onesheet:
try:
shx = int(options.onesheet)
except ValueError:
shx = bk.sheet_by_name(options.onesheet).number
shxrange = [shx]
else:
shxrange = range(bk.nsheets)
# print("shxrange", list(shxrange))
for shx in shxrange:
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
colrange = range(ncols)
anshow = min(nshow, nrows)
print("sheet %d: name = %s; nrows = %d; ncols = %d" %
(shx, REPR(sh.name), sh.nrows, sh.ncols))
if nrows and ncols:
# Beat the bounds
for rowx in xrange(nrows):
nc = sh.row_len(rowx)
if nc:
_junk = sh.row_types(rowx)[nc-1]
_junk = sh.row_values(rowx)[nc-1]
_junk = sh.cell(rowx, nc-1)
for rowx in xrange(anshow-1):
if not printit and rowx % 10000 == 1 and rowx > 1:
print("done %d rows" % (rowx-1,))
show_row(bk, sh, rowx, colrange, printit)
if anshow and nrows:
show_row(bk, sh, nrows-1, colrange, printit)
print()
if bk.on_demand: bk.unload_sheet(shx)
def count_xfs(bk):
bk_header(bk)
for shx in range(bk.nsheets):
sh = bk.sheet_by_index(shx)
nrows, ncols = sh.nrows, sh.ncols
print("sheet %d: name = %r; nrows = %d; ncols = %d" %
(shx, sh.name, sh.nrows, sh.ncols))
# Access all xfindexes to force gathering stats
type_stats = [0, 0, 0, 0, 0, 0, 0]
for rowx in xrange(nrows):
for colx in xrange(sh.row_len(rowx)):
xfx = sh.cell_xf_index(rowx, colx)
assert xfx >= 0
cty = sh.cell_type(rowx, colx)
type_stats[cty] += 1
print("XF stats", sh._xf_index_stats)
print("type stats", type_stats)
print()
if bk.on_demand: bk.unload_sheet(shx)
def main(cmd_args):
import optparse
global options, PSYCO
usage = "\n%prog [options] command [input-file-patterns]\n" + cmd_doc
oparser = optparse.OptionParser(usage)
oparser.add_option(
"-l", "--logfilename",
default="",
help="contains error messages")
oparser.add_option(
"-v", "--verbosity",
type="int", default=0,
help="level of information and diagnostics provided")
oparser.add_option(
"-m", "--mmap",
type="int", default=-1,
help="1: use mmap; 0: don't use mmap; -1: accept heuristic")
oparser.add_option(
"-e", "--encoding",
default="",
help="encoding override")
oparser.add_option(
"-f", "--formatting",
type="int", default=0,
help="0 (default): no fmt info\n"
"1: fmt info (all cells)\n"
)
oparser.add_option(
"-g", "--gc",
type="int", default=0,
help="0: auto gc enabled; 1: auto gc disabled, manual collect after each file; 2: no gc")
oparser.add_option(
"-s", "--onesheet",
default="",
help="restrict output to this sheet (name or index)")
oparser.add_option(
"-u", "--unnumbered",
action="store_true", default=0,
help="omit line numbers or offsets in biff_dump")
oparser.add_option(
"-d", "--on-demand",
action="store_true", default=0,
help="load sheets on demand instead of all at once")
oparser.add_option(
"-t", "--suppress-timing",
action="store_true", default=0,
help="don't print timings (diffs are less messy)")
oparser.add_option(
"-r", "--ragged-rows",
action="store_true", default=0,
help="open_workbook(..., ragged_rows=True)")
options, args = oparser.parse_args(cmd_args)
if len(args) == 1 and args[0] in ("version", ):
pass
elif len(args) < 2:
oparser.error("Expected at least 2 args, found %d" % len(args))
cmd = args[0]
xlrd_version = getattr(xlrd, "__VERSION__", "unknown; before 0.5")
if cmd == 'biff_dump':
xlrd.dump(args[1], unnumbered=options.unnumbered)
sys.exit(0)
if cmd == 'biff_count':
xlrd.count_records(args[1])
sys.exit(0)
if cmd == 'version':
print("xlrd: %s, from %s" % (xlrd_version, xlrd.__file__))
print("Python:", sys.version)
sys.exit(0)
if options.logfilename:
logfile = LogHandler(open(options.logfilename, 'w'))
else:
logfile = sys.stdout
mmap_opt = options.mmap
mmap_arg = xlrd.USE_MMAP
if mmap_opt in (1, 0):
mmap_arg = mmap_opt
elif mmap_opt != -1:
print('Unexpected value (%r) for mmap option -- assuming default' % mmap_opt)
fmt_opt = options.formatting | (cmd in ('xfc', ))
gc_mode = options.gc
if gc_mode:
gc.disable()
for pattern in args[1:]:
for fname in glob.glob(pattern):
print("\n=== File: %s ===" % fname)
if logfile != sys.stdout:
logfile.setfileheading("\n=== File: %s ===\n" % fname)
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC before open:", n_unreachable, "unreachable objects")
if PSYCO:
import psyco
psyco.full()
PSYCO = 0
try:
t0 = time.time()
bk = xlrd.open_workbook(fname,
verbosity=options.verbosity, logfile=logfile,
use_mmap=mmap_arg,
encoding_override=options.encoding,
formatting_info=fmt_opt,
on_demand=options.on_demand,
ragged_rows=options.ragged_rows,
)
t1 = time.time()
if not options.suppress_timing:
print("Open took %.2f seconds" % (t1-t0,))
except xlrd.XLRDError as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
continue
except KeyboardInterrupt:
print("*** KeyboardInterrupt ***")
traceback.print_exc(file=sys.stdout)
sys.exit(1)
except BaseException as e:
print("*** Open failed: %s: %s" % (type(e).__name__, e))
traceback.print_exc(file=sys.stdout)
continue
t0 = time.time()
if cmd == 'hdr':
bk_header(bk)
elif cmd == 'ov': # OverView
show(bk, 0)
elif cmd == 'show': # all rows
show(bk)
elif cmd == '2rows': # first row and last row
show(bk, 2)
elif cmd == '3rows': # first row, 2nd row and last row
show(bk, 3)
elif cmd == 'bench':
show(bk, printit=0)
elif cmd == 'fonts':
bk_header(bk)
show_fonts(bk)
elif cmd == 'names': # named reference list
show_names(bk)
elif cmd == 'name_dump': # named reference list
show_names(bk, dump=1)
elif cmd == 'labels':
show_labels(bk)
elif cmd == 'xfc':
count_xfs(bk)
else:
print("*** Unknown command <%s>" % cmd)
sys.exit(1)
del bk
if gc_mode == 1:
n_unreachable = gc.collect()
if n_unreachable:
print("GC post cmd:", fname, "->", n_unreachable, "unreachable objects")
if not options.suppress_timing:
t1 = time.time()
print("\ncommand took %.2f seconds\n" % (t1-t0,))
return None
av = sys.argv[1:]
if not av:
main(av)
firstarg = av[0].lower()
if firstarg == "hotshot":
import hotshot, hotshot.stats
av = av[1:]
prof_log_name = "XXXX.prof"
prof = hotshot.Profile(prof_log_name)
# benchtime, result = prof.runcall(main, *av)
result = prof.runcall(main, *(av, ))
print("result", repr(result))
prof.close()
stats = hotshot.stats.load(prof_log_name)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
elif firstarg == "profile":
import cProfile
av = av[1:]
cProfile.run('main(av)', 'YYYY.prof')
import pstats
p = pstats.Stats('YYYY.prof')
p.strip_dirs().sort_stats('cumulative').print_stats(30)
elif firstarg == "psyco":
PSYCO = 1
main(av[1:])
else:
main(av)
|
[
"kshitijvr93@gmail.com"
] |
kshitijvr93@gmail.com
|
4be6990f13b75dcd2985e79a5e714ecc11eed993
|
d69cbe28187c37cbeb922a2ce397e35df9a5c217
|
/test/distributed/test_store.py
|
cab8ac1d8121afe4ea9cec476554a23f892c0b3d
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
standbyme/pytorch
|
3347245349bd88bab7ba5f28cf598053280de47c
|
99282126dc51e34876844df5a6cbb5933a9674f5
|
refs/heads/master
| 2021-10-29T22:07:02.973341
| 2021-10-29T12:20:29
| 2021-10-29T12:22:17
| 271,911,535
| 0
| 5
|
NOASSERTION
| 2020-06-13T00:08:46
| 2020-06-13T00:08:45
| null |
UTF-8
|
Python
| false
| false
| 15,468
|
py
|
# Owner(s): ["oncall: distributed"]
import os
import random
import sys
import tempfile
import time
from datetime import timedelta
from sys import platform
import torch
import torch.distributed as dist
import torch.distributed.rpc as rpc
if not dist.is_available():
print("torch.distributed not available, skipping tests", file=sys.stderr)
sys.exit(0)
import torch.testing._internal.common_utils as common
from torch._six import string_classes
from torch.testing._internal.common_distributed import (
skip_if_win32,
create_tcp_store
)
from torch.testing._internal.common_utils import (
TestCase,
load_tests,
run_tests,
retry_on_connect_failures,
ADDRESS_IN_USE,
CONNECT_TIMEOUT,
)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
if platform == "darwin":
LOOPBACK = "lo0"
else:
LOOPBACK = "lo"
DEFAULT_HOSTNAME = "localhost"
torch.backends.cuda.matmul.allow_tf32 = False
def gpus_for_rank(world_size):
"""Multigpu tests are designed to simulate the multi nodes with multi
GPUs on each node. Nccl backend requires equal #GPUs in each process.
On a single node, all visible GPUs are evenly
divided to subsets, each process only uses a subset.
"""
visible_devices = list(range(torch.cuda.device_count()))
gpus_per_process = torch.cuda.device_count() // world_size
gpus_for_rank = []
for rank in range(world_size):
gpus_for_rank.append(
visible_devices[rank * gpus_per_process: (rank + 1) * gpus_per_process]
)
return gpus_for_rank
class StoreTestBase(object):
def _create_store(self, i):
raise RuntimeError("not implemented")
def _test_set_get(self, fs):
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
fs.add("key3", 2)
fs.set("key2", "value2")
fs.add("key3", 3)
fs.add("key3", 4)
fs.add("key3", 5)
fs.add("key3", 6)
self.assertEqual(fs.num_keys(), self.num_keys_total)
self.assertEqual(b"6", fs.get("key"))
self.assertEqual(b"value0", fs.get("key0"))
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key2"))
self.assertEqual(b"21", fs.get("key3"))
def test_set_get(self):
self._test_set_get(self._create_store())
def _test_compare_set(self, store):
missing_key_result = store.compare_set("cs_key0", "wrong_old_value", "new_value0")
self.assertEqual(b"wrong_old_value", missing_key_result)
store.set("cs_key0", "value0")
self.assertEqual(b"value0", store.get("cs_key0"))
old_value_result = store.compare_set("cs_key0", "wrong_old_value", "new_value0")
self.assertEqual(b"value0", old_value_result)
self.assertEqual(b"value0", store.get("cs_key0"))
new_value_result = store.compare_set("cs_key0", "value0", "new_value0")
self.assertEqual(b"new_value0", new_value_result)
self.assertEqual(b"new_value0", store.get("cs_key0"))
empty_old_value_result = store.compare_set("cs_key1", "", "new_value1")
self.assertEqual(b"new_value1", empty_old_value_result)
self.assertEqual(b"new_value1", store.get("cs_key1"))
def test_compare_set(self):
self._test_compare_set(self._create_store())
# This is the number of keys used in test_set_get. Adding this as a class
# property instead of hardcoding in the test since some Store
# implementations will have differing number of keys. In the base case,
# there will be 5 keys: key, key0, key1, key2, key3.
@property
def num_keys_total(self):
return 5
class FileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(FileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
def _create_store(self):
store = dist.FileStore(self.file.name, 1)
store.set_timeout(timedelta(seconds=300))
return store
@skip_if_win32()
class HashStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(HashStoreTest, self).setUp()
def _create_store(self):
store = dist.HashStore()
store.set_timeout(timedelta(seconds=300))
return store
class PrefixFileStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixFileStoreTest, self).setUp()
self.file = tempfile.NamedTemporaryFile(delete=False)
self.filestore = dist.FileStore(self.file.name, 1)
self.prefix = "test_prefix"
self.filestore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return dist.PrefixStore(self.prefix, self.filestore)
class TCPStoreTest(TestCase, StoreTestBase):
def _create_store(self):
store = create_tcp_store()
store.set_timeout(timedelta(seconds=300))
return store
def test_address_already_in_use(self):
if sys.platform == "win32":
err_msg_reg = "Only one usage of each socket address*"
else:
err_msg_reg = "^Address already in use$"
with self.assertRaisesRegex(RuntimeError, err_msg_reg):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
# Use noqa to silence flake8.
# Need to store in an unused variable here to ensure the first
# object is not destroyed before the second object is created.
store1 = dist.TCPStore(addr, port, 1, True) # noqa: F841
store2 = dist.TCPStore(addr, port, 1, True) # noqa: F841
def test_multitenancy(self):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
# Use noqa to silence flake8.
# Need to store in an unused variable here to ensure the first
# object is not destroyed before the second object is created.
store1 = dist.TCPStore(addr, port, 1, True, multi_tenant=True) # type: ignore[call-arg] # noqa: F841
store2 = dist.TCPStore(addr, port, 1, True, multi_tenant=True) # type: ignore[call-arg] # noqa: F841
@skip_if_win32()
def test_init_pg_and_rpc_with_same_socket(self):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
os.environ["MASTER_ADDR"] = addr
os.environ["MASTER_PORT"] = str(port)
# We internally use a multi-tenant TCP store. Both PG and RPC should successfully
# initialize even when using the same socket address.
dist.init_process_group(
backend="gloo",
init_method="env://",
rank=0,
world_size=1,
)
backend_opts = rpc.TensorPipeRpcBackendOptions(
init_method=f"tcp://{addr}:{port}"
)
rpc.init_rpc(
name="worker0",
rank=0,
world_size=1,
rpc_backend_options=backend_opts,
)
rpc.shutdown()
# The TCPStore has 6 keys in test_set_get. It contains the 5 keys added by
# the user and one additional key used for coordinate all the workers.
@property
def num_keys_total(self):
return 6
def _test_numkeys_delkeys(self, fs):
# We start off with one init key in the store to coordinate workers
self.assertEqual(fs.num_keys(), 1)
fs.add("key", 1)
fs.add("key", 2)
fs.add("key", 3)
fs.set("key0", "value0")
fs.add("key3", 1)
fs.set("key1", "value1")
self.assertEqual(fs.num_keys(), 5)
fs.delete_key("key")
self.assertEqual(fs.num_keys(), 4)
fs.set_timeout(timedelta(seconds=2))
with self.assertRaises(RuntimeError):
fs.get("key")
fs.delete_key("key0")
fs.delete_key("key3")
self.assertEqual(fs.num_keys(), 2)
fs.set("key4", "value2")
self.assertEqual(fs.num_keys(), 3)
self.assertEqual(b"value1", fs.get("key1"))
self.assertEqual(b"value2", fs.get("key4"))
def test_numkeys_delkeys(self):
self._test_numkeys_delkeys(self._create_store())
def _create_client(self, index, addr, port, world_size):
client_store = dist.TCPStore(addr, port, world_size, timeout=timedelta(seconds=10))
self.assertEqual("value".encode(), client_store.get("key"))
client_store.set(f"new_key{index}", f"new_value{index}")
self.assertEqual(f"next_value{index}".encode(),
client_store.compare_set(f"new_key{index}", f"new_value{index}", f"next_value{index}"))
def _multi_worker_helper(self, world_size):
addr = DEFAULT_HOSTNAME
server_store = create_tcp_store(addr, world_size, wait_for_workers=False)
server_store.set("key", "value")
port = server_store.port
world_size = random.randint(5, 10) if world_size == -1 else world_size
for i in range(world_size):
self._create_client(i, addr, port, world_size)
def test_multi_worker_with_fixed_world_size(self):
self._multi_worker_helper(5)
def test_multi_worker_with_nonfixed_world_size(self):
self._multi_worker_helper(-1)
class PrefixTCPStoreTest(TestCase, StoreTestBase):
def setUp(self):
super(PrefixTCPStoreTest, self).setUp()
self.tcpstore = create_tcp_store()
self.prefix = "test_prefix"
self.tcpstore.set_timeout(timedelta(seconds=300))
def _create_store(self):
return dist.PrefixStore(self.prefix, self.tcpstore)
# The PrefixTCPStore has 6 keys in test_set_get. It contains the 5 keys
# added by the user and one additional key used for coordinate all the
# workers.
@property
def num_keys_total(self):
return 6
class MyPythonStore(dist.Store):
def __init__(self):
super(MyPythonStore, self).__init__()
self.store = dict()
def set(self, key, value):
if not isinstance(key, string_classes):
raise AssertionError("Expected set to be called with string key")
if type(value) is not bytes:
raise AssertionError("Expected set to be called with bytes value")
self.store[key] = value
def get(self, key):
value = self.store.get(key, b"")
if type(value) is not bytes:
raise AssertionError("Expected get to return bytes value")
return value
def add(self, key, value):
new = int(self.store.get(key, 0)) + value
self.set(key, bytes(str(new).encode("utf-8")))
return new
class PythonStoreTest(TestCase):
def setUp(self):
super(PythonStoreTest, self).setUp()
def test_set_get(self):
# If we were to inherit from StoreTestBase and try to use
# its test_set_get function, we would exercise the Python
# API directly, instead of going through the C++ trampoline.
# We care about testing the C++ trampoline, so run the
# equivalent of StoreTestBase.test_set_get from C++.
# See `torch/csrc/distributed/c10d/init.cpp` for the definition
# of this test function.
dist._test_python_store(MyPythonStore())
class RendezvousTest(TestCase):
def test_unknown_handler(self):
with self.assertRaisesRegex(RuntimeError, "^No rendezvous handler"):
dist.rendezvous("invalid://")
class RendezvousEnvTest(TestCase):
@retry_on_connect_failures
def test_nominal(self):
os.environ["WORLD_SIZE"] = "1"
os.environ["MASTER_ADDR"] = "127.0.0.1"
os.environ["MASTER_PORT"] = str(common.find_free_port())
# Single rank
os.environ["RANK"] = "0"
gen0 = dist.rendezvous("env://")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
class RendezvousFileTest(TestCase):
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, "path missing"):
gen = dist.rendezvous("file://?rank=0&world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
gen = dist.rendezvous("file:///tmp/foo?world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "size parameter missing"):
gen = dist.rendezvous("file:///tmp/foo?rank=0")
next(gen)
def test_nominal(self):
with tempfile.NamedTemporaryFile(delete=False) as file:
url = f'file:///{file.name.replace(os.path.sep, "/")}?world_size=2'
gen0 = dist.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(2, size0)
gen1 = dist.rendezvous(url + "&rank=1")
store1, rank1, size1 = next(gen1)
self.assertEqual(1, rank1)
self.assertEqual(2, size1)
# Set value on both stores
store0.set("key0", "value0")
store1.set("key1", "value1")
# Cross check with get
self.assertEqual(b"value0", store1.get("key0"))
self.assertEqual(b"value1", store0.get("key1"))
@skip_if_win32()
class RendezvousTCPTest(TestCase):
def create_tcp_url(self):
addr = DEFAULT_HOSTNAME
port = common.find_free_port()
url = "tcp://%s:%d?world_size=%d" % (addr, port, 1)
return url
def test_common_errors(self):
with self.assertRaisesRegex(ValueError, "port number missing"):
gen = dist.rendezvous("tcp://127.0.0.1?rank=0&world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "rank parameter missing"):
gen = dist.rendezvous("tcp://127.0.0.1:23456?world_size=1")
next(gen)
with self.assertRaisesRegex(ValueError, "size parameter missing"):
gen = dist.rendezvous("tcp://127.0.0.1:23456?rank=0")
next(gen)
@retry_on_connect_failures
def test_nominal(self):
url = self.create_tcp_url()
gen0 = dist.rendezvous(url + "&rank=0")
store0, rank0, size0 = next(gen0)
self.assertEqual(0, rank0)
self.assertEqual(1, size0)
# Set value on the single store
store0.set("key0", "value0")
# check with get
self.assertEqual(b"value0", store0.get("key0"))
@retry_on_connect_failures(connect_errors=(CONNECT_TIMEOUT, ADDRESS_IN_USE))
def test_tcp_store_timeout_set(self):
url = self.create_tcp_url()
test_store_timeout = timedelta(seconds=10)
gen0 = dist.rendezvous(url + "&rank=0", timeout=test_store_timeout)
store0, rank0, size0 = next(gen0)
# this should time out in 10s. If the timeout passed into rendezvous was
# not respected, it will take much longer to timeout.
start = time.time()
with self.assertRaisesRegex(RuntimeError, "Timeout"):
store0.get("nonexistant key")
end = time.time()
time_diff = end - start
self.assertGreater(test_store_timeout.seconds * 10, time_diff)
if __name__ == "__main__":
assert (
not torch.cuda._initialized
), "test_distributed must not have initialized CUDA context on main process"
run_tests()
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
4b23352b2e65a49bf3b7f634deb8339b32ef707c
|
d8c35f71daa6f6ad4ca403fec5e32a840b246558
|
/practice_problems/strings_arrays/string_rotation.py
|
3e8a8e77589686fec04f53bb2855ae2de743a8c6
|
[
"MIT"
] |
permissive
|
YazzyYaz/codinginterviews
|
13ff0cf78b8c40ec65817722a261501e20a3799d
|
b8115c43507d738bccd90366f2bc02867ba7f13f
|
refs/heads/master
| 2020-03-16T21:05:05.836058
| 2018-08-08T03:32:48
| 2018-08-08T03:32:48
| 132,983,993
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
def is_rotation(s1, s2):
if len(s1) != len(s2):
return False
return is_substring(s1+s1, s2)
def is_substring(s1, s2):
for x in range(len(s1) - len(s2) - 1):
is_substring_flag = True
for y in range(len(s2)):
if s1[x+y] != s2[y]:
is_substring_flag = False
break
if is_substring_flag:
return True
return False
s1 = "waterbottle"
s2 = "erbottlewat"
print(is_rotation(s1, s2))
|
[
"yaz.khoury@gmail.com"
] |
yaz.khoury@gmail.com
|
4082aded1997f15b2e6374a653d4d0b4dc1323dc
|
05917c56ba7863ef5366ec2ecffc4e9e7e9e7094
|
/CS2-master/CS2/migrations/versions/e5638e55e8f8_initial_migration.py
|
3c0d300edb65f4c5eba87cc5ab70c326dd736c5e
|
[] |
no_license
|
hafizhsyafiqh/CS2
|
03bc20420537c335acaea80aa5ba907af2f082e5
|
0e380799253daee914d5bd51964aad7bdd577bd3
|
refs/heads/master
| 2022-11-17T23:19:44.112292
| 2020-04-23T18:13:43
| 2020-04-23T18:13:43
| 255,489,290
| 0
| 1
| null | 2020-07-23T03:47:32
| 2020-04-14T02:20:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,963
|
py
|
"""Initial migration.
Revision ID: e5638e55e8f8
Revises: aab60456f86f
Create Date: 2020-04-07 15:39:06.817559
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
# revision identifiers, used by Alembic.
revision = 'e5638e55e8f8'
down_revision = 'aab60456f86f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('alamat', sa.String(length=20), nullable=True))
op.add_column('user', sa.Column('kelas', sa.String(length=20), nullable=True))
op.add_column('user', sa.Column('nama', sa.String(length=20), nullable=True))
op.add_column('user', sa.Column('nim', sa.String(length=10), nullable=True))
op.drop_index('email', table_name='user')
op.drop_index('username', table_name='user')
op.create_unique_constraint(None, 'user', ['nama'])
op.create_unique_constraint(None, 'user', ['alamat'])
op.create_unique_constraint(None, 'user', ['kelas'])
op.create_unique_constraint(None, 'user', ['nim'])
op.drop_column('user', 'username')
op.drop_column('user', 'email')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('email', mysql.VARCHAR(length=120), nullable=True))
op.add_column('user', sa.Column('username', mysql.VARCHAR(length=80), nullable=True))
op.drop_constraint(None, 'user', type_='unique')
op.drop_constraint(None, 'user', type_='unique')
op.drop_constraint(None, 'user', type_='unique')
op.drop_constraint(None, 'user', type_='unique')
op.create_index('username', 'user', ['username'], unique=True)
op.create_index('email', 'user', ['email'], unique=True)
op.drop_column('user', 'nim')
op.drop_column('user', 'nama')
op.drop_column('user', 'kelas')
op.drop_column('user', 'alamat')
# ### end Alembic commands ###
|
[
"hafizhsyafiqh@gmail.com"
] |
hafizhsyafiqh@gmail.com
|
db3cd90ea6c122880dc689b41c95e0d5b8721e03
|
91c9bc868ffa262b168081d4b7696581918742a2
|
/tests/test_credstore.py
|
605bb7b3852b95bce1160c53f2220622ebcb2479
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
borland667/pyvmomi-tools
|
caface9f133527d46ca41993624f83b6f900b170
|
667106d57f9a2e415b65321d6317f1e94bc3c2ba
|
refs/heads/master
| 2021-01-21T15:22:26.111774
| 2015-05-22T00:37:58
| 2015-05-25T00:53:15
| 35,787,465
| 0
| 0
| null | 2015-05-17T23:41:57
| 2015-05-17T23:41:57
| null |
UTF-8
|
Python
| false
| false
| 5,212
|
py
|
# Copyright (c) 2014 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Minimal functionality to read and use passwords from vSphere Credential Store XML file
'''
from __future__ import print_function
import os
import shutil
import unittest
import sys
from pyvmomi_tools.extensions.credstore import VICredStore, NoCredentialsFileFound, HostNotFoundException, PasswordEntry
from sys import platform as _platform
try:
# Python 3.x compatibility workaround
from cStringIO import StringIO
except ImportError:
from io import StringIO
__author__ = 'Osvaldo Demo'
class VICredStoreTests(unittest.TestCase):
def _create_credentialsfile(self, file):
target = open(file, 'w')
target.write('<?xml version="1.0" encoding="UTF-8"?>')
target.write("\n")
target.write(' <viCredentials>')
target.write("\n")
target.write(' <version>1.0</version>')
target.write("\n")
target.write(' <passwordEntry>')
target.write("\n")
target.write(' <host>mytesthost</host>')
target.write("\n")
target.write(' <username>testuser</username>')
target.write("\n")
target.write(' <password>NyYwNzMiMDA0LDEnQwY6EwoWFHsINgUwdCV1cg1wDyUtJBssG3cicRE7MQcVKxp1FhsOHBMrdSASNwoJCXM2cjUaOy0JJXsIFXN2EgAsKzUmeiU6EzIvcisrBAEIdg87IQs7JRI3DRwQMRsAMwIGJw8CAXQuDjslJRERKnEmB0M=</password>')
target.write("\n")
target.write(' </passwordEntry>')
target.write("\n")
target.write('</viCredentials>')
target.write("\n")
target.close()
def setUp(self):
self.test_path = "mycredentials.xml"
self._create_credentialsfile(self.test_path)
self.path = None
if _platform == "linux" or _platform == "linux2":
self.path = os.environ['HOME'] + VICredStore.FILE_PATH_UNIX
elif _platform == "win32":
self.path = os.environ['APPDATA'] + VICredStore.FILE_PATH_WIN
if self.path is not None:
if os.path.exists(self.path):
shutil.copy(self.path,self.path+'.bak')
shutil.copy(self.test_path,self.path)
else:
if not os.path.exists(os.path.dirname(self.path)):
os.makedirs(os.path.dirname(self.path))
shutil.copy(self.test_path,self.path)
def tearDown(self):
os.remove('mycredentials.xml')
if self.path is not None:
if os.path.exists(self.path+'.bak'):
shutil.copy(self.path+'.bak',self.path)
os.remove(self.path+'.bak')
else:
shutil.rmtree(os.path.dirname(self.path))
def test_get_userpwd(self):
os.environ['VI_CREDSTORE'] = self.test_path
store = VICredStore(os.environ['VI_CREDSTORE'])
self.assertEqual(store.get_userpwd('mytesthost'),('testuser','testpassword'))
def test_get_userpwd_2(self):
store = VICredStore()
self.assertEqual(store.get_userpwd('mytesthost'),('testuser','testpassword'))
def test_get_userpwd_3(self):
os.environ.pop('VI_CREDSTORE',None)
if self.path is not None:
if not os.path.exists(os.path.dirname(self.path)):
os.makedirs(os.path.dirname(self.path))
self._create_credentialsfile(self.path)
store = VICredStore()
self.assertEqual(store.get_userpwd('mytesthost'),('testuser','testpassword'))
def test_VICredStore_NoCredentialsFileFound(self):
self.assertRaises(NoCredentialsFileFound,VICredStore,'anyfile.xml')
def test_get_userpwd_HostNotFoundException(self):
os.environ['VI_CREDSTORE'] = self.test_path
store = VICredStore(os.environ['VI_CREDSTORE'])
self.assertRaises(HostNotFoundException,store.get_userpwd,'notexistanthost')
def test_get_pwd_entry_list(self):
os.environ['VI_CREDSTORE'] = self.test_path
store = VICredStore(os.environ['VI_CREDSTORE'])
pwdentry = PasswordEntry('mytesthost','testuser','NyYwNzMiMDA0LDEnQwY6EwoWFHsINgUwdCV1cg1wDyUtJBssG3cicRE7MQcVKxp1FhsOHBMrdSASNwoJCXM2cjUaOy0JJXsIFXN2EgAsKzUmeiU6EzIvcisrBAEIdg87IQs7JRI3DRwQMRsAMwIGJw8CAXQuDjslJRERKnEmB0M=')
pwdlist = store._get_pwd_entry_list()
self.assertEqual(len(pwdlist),1)
self.assertEqual(pwdentry,pwdlist[0])
def test_list_entries(self):
self.held, sys.stdout = sys.stdout, StringIO()
os.environ['VI_CREDSTORE'] = self.test_path
store = VICredStore(os.environ['VI_CREDSTORE'])
store.list_entries()
self.assertEqual(sys.stdout.getvalue(),'mytesthost\n')
sys.stdout = self.held
|
[
"demoosvaldo@gmail.com"
] |
demoosvaldo@gmail.com
|
0e5c6f3f1d542d08bb5df4e3402c3984b05f52ef
|
6e8cf0f00b86d7d5f95e2005a5184a6232d2a313
|
/models.py
|
c0858ec63095efec26f6c9eb1b4c8f858630cd23
|
[] |
no_license
|
LaRiffle/ariann
|
0186fd1cda1bc0dfd16c64310740370750fe377a
|
f0a6f3108b639765db65151dd281d39bcfd49f8a
|
refs/heads/main
| 2023-06-25T02:10:37.209141
| 2021-07-14T10:10:54
| 2021-07-14T10:10:54
| 320,008,387
| 41
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,388
|
py
|
import os
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class Network1(nn.Module):
def __init__(self, dataset, out_features):
super(Network1, self).__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, out_features)
def forward(self, x):
x = x.reshape(-1, 784)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
x = F.relu(x)
return x
class Network2(nn.Module):
def __init__(self, dataset, out_features):
super(Network2, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=0)
self.conv2 = nn.Conv2d(16, 16, kernel_size=5, stride=1, padding=0)
self.fc1 = nn.Linear(256, 100)
self.fc2 = nn.Linear(100, out_features)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(x) ## inverted!
x = self.conv2(x)
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(x) ## inverted!
x = x.reshape(-1, 256)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return x
class LeNet(nn.Module):
def __init__(self, dataset, out_features=10):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, kernel_size=5, stride=1, padding=0)
self.conv2 = nn.Conv2d(20, 50, kernel_size=5, stride=1, padding=0)
self.fc1 = nn.Linear(800, 500)
self.fc2 = nn.Linear(500, out_features)
def forward(self, x):
x = self.conv1(x)
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(x) # switched!!
x = self.conv2(x)
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = F.relu(x) # switched!!
x = x.reshape(-1, 800)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
x = F.relu(x)
return x
class AlexNet_CIFAR10(nn.Module):
def __init__(self, out_features=10):
super(AlexNet_CIFAR10, self).__init__()
self.conv_base = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=10),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True), ## inverted!
nn.BatchNorm2d(96),
nn.Conv2d(96, 256, kernel_size=5, stride=1, padding=1),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.ReLU(inplace=True), ## inverted!
nn.BatchNorm2d(256),
nn.Conv2d(256, 384, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 384, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, stride=1, padding=1),
nn.ReLU(inplace=True),
)
self.fc_base = nn.Sequential(
nn.Linear(256, 256),
nn.ReLU(inplace=True),
nn.Linear(256, 256),
nn.ReLU(inplace=True),
nn.Linear(256, out_features),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv_base(x)
x = torch.flatten(x, 1)
x = self.fc_base(x)
return x
class AlexNet_FALCON(nn.Module):
"""
This is the AlexNet version used in FALCON, which is not the standard
of PyTorch
"""
def __init__(self, out_features=10):
super(AlexNet_FALCON, self).__init__()
self.conv_base = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=1, padding=3),
nn.Conv2d(64, 64, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(inplace=True), ## inverted!
nn.BatchNorm2d(64),
nn.Conv2d(64, 128, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(inplace=True), ## inverted!
nn.BatchNorm2d(128),
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.ReLU(inplace=True),
)
self.fc_base = nn.Sequential(
nn.Linear(16384, 1024), # 7*7*256
nn.ReLU(inplace=True),
nn.Linear(1024, 1024),
nn.ReLU(inplace=True),
nn.Linear(1024, 200),
nn.ReLU(inplace=True),
)
def forward(self, x):
x = self.conv_base(x)
x = torch.flatten(x, 1)
x = self.fc_base(x)
return x
def alexnet(dataset, out_features):
if dataset == "cifar10":
model = AlexNet_CIFAR10(out_features)
return model
elif dataset == "tiny-imagenet":
model = models.alexnet(pretrained=True)
class Empty(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
model.avgpool = Empty()
model.classifier = nn.Sequential(
Empty(), # nn.Dropout(),
nn.Linear(256, 1024),
nn.ReLU(True),
Empty(), # nn.Dropout(),
nn.Linear(1024, 1024),
nn.ReLU(True),
nn.Linear(1024, out_features),
)
# Invert ReLU and MaxPool2d
for i, module in enumerate(model.features[:-1]):
next_module = model.features[i + 1]
if isinstance(module, nn.ReLU) and isinstance(next_module, nn.MaxPool2d):
model.features[i + 1] = module
model.features[i] = next_module
return model
else:
raise ValueError("VGG16 can't be built for this dataset, maybe modify it?")
def vgg16(dataset, out_features):
model = models.vgg16()
# Invert ReLU <-> Maxpool
for i, module in enumerate(model.features[:-1]):
next_module = model.features[i + 1]
if isinstance(module, nn.ReLU) and isinstance(next_module, nn.MaxPool2d):
model.features[i + 1] = module
model.features[i] = next_module
class Empty(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
model.avgpool = Empty()
if dataset == "cifar10":
first_linear = nn.Linear(512, 4096)
elif dataset == "tiny-imagenet":
first_linear = nn.Linear(512 * 2 * 2, 4096)
else:
raise ValueError("VGG16 can't be built for this dataset, maybe modify it?")
model.classifier = nn.Sequential(
first_linear,
nn.ReLU(True),
Empty(), # nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
Empty(), # nn.Dropout(),
nn.Linear(4096, out_features),
)
return model
def resnet18(dataset, out_features):
model = models.resnet18()
model.maxpool, model.relu = model.relu, model.maxpool
model.fc = nn.Linear(in_features=512, out_features=out_features)
return model
model_zoo = {
"network1": Network1,
"network2": Network2,
"lenet": LeNet,
"alexnet": alexnet,
"vgg16": vgg16,
"resnet18": resnet18,
}
def get_model(model_name, dataset, out_features):
return model_zoo[model_name](dataset, out_features)
online_models = {
"lenet_mnist": {
"id": "1WWh_POWmgcBEDxk87t50DEZmTik9NRkg",
"file_name": "lenet_mnist_baseline_99.27.pt",
},
"alexnet_cifar10": {
"id": "1-M8SaF19EFSI1Zqmnr9KL5aQG2AEqWND",
"file_name": "alexnet_cifar10_baseline_70.23.pt",
},
"alexnet_tiny-imagenet": {
"id": "1Nygb3K8dbSBYMls3U6rngYIAYrRsLwR0",
"file_name": "alexnet_tiny-imagenet_baseline_37.8.pt",
},
"resnet18_hymenoptera": {
"id": "1bNHE91Fn32AGPNyk_hmGZuQdpnVmyOtR",
"file_name": "resnet18_hymenoptera_95.pt",
},
}
too_big_models = {
"vgg16_cifar10": "17k1nKItmp-4E1r5GFqfs8oH1Uhmp5e_0",
"vgg16_tiny-imagenet": "1uBiLpPi34Z3NywW3zwilMZpmb964oU8q",
}
def load_state_dict(model, model_name, dataset):
MODEL_PATH = "pretrained_models/"
base_name = f"{model_name}_{dataset}"
file_name = None
for file in os.scandir(MODEL_PATH):
if re.match(fr"^{base_name}", file.name):
file_name = file.name
if file_name is None:
if base_name in online_models:
id = online_models[base_name]["id"]
file_name = online_models[base_name]["file_name"]
print(f"Downloading model {file_name}... ")
os.system(
f"wget --no-check-certificate "
f"'https://docs.google.com/uc?export=download&id={id}' -O {MODEL_PATH+file_name}"
)
else:
if base_name in too_big_models:
id = too_big_models[base_name]
print(
f"Model {base_name} has to be downloaded manually :( \n\n"
f"https://docs.google.com/uc?export=download&id={id}\n"
)
raise FileNotFoundError(f"No pretrained model for {model_name} {dataset} was found!")
model.load_state_dict(torch.load(MODEL_PATH + file_name, map_location=torch.device("cpu")))
print(f"Pre-trained model loaded from {file_name}")
|
[
"theo.leffyr@gmail.com"
] |
theo.leffyr@gmail.com
|
f62815d250bf04c30cce5b51f3eb7c348451abf7
|
89af5117876262e23501f9fa6a21e753532ba64a
|
/matlab-with-python/MatlabExample/mySpeechRecognizer.py
|
dbe1e1fb74ff9751a73f7fbb05ec2d12fba584e0
|
[] |
no_license
|
PeterJochem/Chrono_Simulations
|
a094f2ef01cb2438897fd3827d1fa1598e087b70
|
7bcf07762239eddaf9a270072616fbc7fd11fd2f
|
refs/heads/master
| 2022-10-22T16:08:54.456150
| 2020-06-14T23:08:17
| 2020-06-14T23:08:17
| 258,672,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,239
|
py
|
# Python speech recognition code using CMU PocketSphinx
# Source: https://pypi.python.org/pypi/SpeechRecognition/
import speech_recognition as sr
# Takes an audio signal (in uint8 format), along with its sampling frequency and channel width, and outputs detected text and success flag.
def audioToText(audio,freq,width):
# Create an AudioData object from the input signal
audioData = sr.AudioData(bytes(audio),freq,width)
# Create a recognizer instance and attempt to recognize text
myRec = sr.Recognizer()
try:
recText = myRec.recognize_sphinx(audioData)
successFlag = True
except:
recText = ""
successFlag = False
# Return recognized text and success flag
return [recText,successFlag]
# Directly listens to the microphone and returns detected text.
def speechToText():
# First, get the audio data from the microphone
myRec = sr.Recognizer()
myRec.energy_threshold = 1000 # Increase to reduce noise
with sr.Microphone() as source:
audioData = myRec.listen(source)
try:
# Call speech recognizer
return myRec.recognize_sphinx(audioData)
except:
return ""
|
[
"peterpennrichjochem@gmail.com"
] |
peterpennrichjochem@gmail.com
|
c96ad3a0f250160ec9730f0f3cbe97a76ccf0a01
|
5924e1233a83b0d1236dd660a72d50ff9ff8a22c
|
/plugin.py
|
5a1ea246b1ae705fd363e903b7ed6187f0480543
|
[] |
no_license
|
TBemme/galaxy-integration-psp
|
6b01e26903d2dd5843e3daf87f1a687e261311b5
|
599fc336b1c12c4ae158c410134e10441bf5187f
|
refs/heads/master
| 2020-07-06T03:33:50.418357
| 2019-08-29T07:57:26
| 2019-08-29T07:57:26
| 202,875,549
| 9
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,571
|
py
|
import asyncio
import json
import os
import subprocess
import sys
import re
import config
from galaxy.api.plugin import Plugin, create_and_run_plugin
from galaxy.api.consts import Platform, LicenseType, LocalGameState
from galaxy.api.types import Authentication, Game, LocalGame, LicenseInfo
from version import __version__
from bs4 import BeautifulSoup
titleregex = re.compile("([^\(]*) \(.*")
datregex = re.compile("^[xz0-9]+[0-9]+[0-9]+[0-9]+\s+-")
fileformat = '.iso'
class PlayStationPortablePlugin(Plugin):
def __init__(self, reader, writer, token):
super().__init__(Platform.PlayStationPortable, __version__, reader, writer, token)
self.games = []
self.local_games_cache = self.local_games_list()
async def authenticate(self, stored_credentials=None):
usercred = {}
username = config.username
usercred["username"] = config.username
self.store_credentials(usercred)
return Authentication("PSPuserId", usercred["username"])
async def launch_game(self, game_id):
args = [ config.emu_path ]
if config.emu_args != []:
args.extend(config.emu_args)
for game in self.games:
if str(game[1]) == game_id:
args.append(game[0])
subprocess.Popen(args)
break
return
# async def install_game(self, game_id):
# pass
# async def uninstall_game(self, game_id):
# pass
def shutdown(self):
pass
def local_games_list(self):
local_games = []
for game in self.games:
if os.path.exists(game[0]):
local_games.append(
LocalGame(
str(game[1]),
LocalGameState.Installed
)
)
return local_games
def tick(self):
async def update_local_games():
loop = asyncio.get_running_loop()
new_local_games_list = await loop.run_in_executor(None, self.local_games_list)
notify_list = get_state_changes(self.local_games_cache, new_local_games_list)
self.local_games_cache = new_local_games_list
for local_game_notify in notify_list:
self.update_local_game_status(local_game_notify)
asyncio.create_task(update_local_games())
async def get_owned_games(self):
self.games = get_games()
owned_games = []
for game in self.games:
owned_games.append(
Game(
str(game[1]),
game[2],
None,
LicenseInfo(LicenseType.SinglePurchase, None)
)
)
return owned_games
async def get_local_games(self):
return self.local_games_cache
def get_games():
games = []
results = []
try:
with open(game_list) as f:
for line in f:
games.append(line.rstrip())
except UnicodeDecodeError:
with open(game_list, encoding='utf-8-sig') as f:
for line in f:
games.append(line.rstrip())
with open(game_dat) as fp:
soup = BeautifulSoup(fp, "xml")
for game in games:
try:
pspgamedata = soup.find("rom", {"name":game + '.iso'})
path = os.path.join(*roms_path, game + fileformat)
title_id = pspgamedata['serial']
#datregex used to check num/not-num and titleregex strip unneeded parts to create game_title
if datregex.match(game):
results.append(
[path, title_id, titleregex.match(pspgamedata['name'][7:])[1]]
)
else:
results.append(
[path, title_id, titleregex.match(pspgamedata['name'])[1]]
)
except:
print('Error processing', game)
return results
def get_state_changes(old_list, new_list):
old_dict = {x.game_id: x.local_game_state for x in old_list}
new_dict = {x.game_id: x.local_game_state for x in new_list}
result = []
# removed games
result.extend(LocalGame(id, LocalGameState.None_) for id in old_dict.keys() - new_dict.keys())
# added games
result.extend(local_game for local_game in new_list if local_game.game_id in new_dict.keys() - old_dict.keys())
# state changed
result.extend(LocalGame(id, new_dict[id]) for id in new_dict.keys() & old_dict.keys() if new_dict[id] != old_dict[id])
return result
default_game_list = os.environ['localappdata'] + '\\GOG.com\\Galaxy\\plugins\\installed\\psp_cea94ced-c6cd-44cf-940c-a6c9253827a9\\PSP-list.txt'
if os.path.exists(config.game_list) and os.path.isfile(config.game_list):
game_list = config.game_list
else:
game_list = default_game_list
default_game_dat = os.environ['localappdata'] + '\\GOG.com\\Galaxy\\plugins\\installed\\psp_cea94ced-c6cd-44cf-940c-a6c9253827a9\\PSP.dat'
if os.path.exists(config.game_dat) and os.path.isfile(config.game_dat):
game_dat = config.game_dat
else:
game_dat = default_game_dat
roms_path = os.path.split(config.roms_path)
def main():
create_and_run_plugin(PlayStationPortablePlugin, sys.argv)
# run plugin event loop
if __name__ == "__main__":
main()
|
[
"53412915+TBemme@users.noreply.github.com"
] |
53412915+TBemme@users.noreply.github.com
|
eadc71823084cf16a158b3d119f7327215fa723d
|
3d01ca84834e50c48057caf1d2294f7bdaa9f6c3
|
/mkt/developers/views_payments.py
|
781ccbca0999346b2f44b9514d4b92dfccac872c
|
[] |
no_license
|
caseybecking/zamboni
|
5d674c4ed13566481c3cbc665c0b836c90310698
|
78aa8000fa4e503b78f3ddf7e87c93275e70e8bd
|
refs/heads/master
| 2021-01-21T00:59:25.846096
| 2012-12-26T21:26:36
| 2012-12-26T21:26:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,193
|
py
|
from django import http
from django.shortcuts import get_object_or_404, redirect
import commonware
import jingo
from tower import ugettext as _
import amo
from amo import messages
from amo.decorators import json_view, post_required, write
from lib.pay_server import client
from mkt.constants import DEVICE_LOOKUP
from mkt.developers.decorators import dev_required
from . import forms, models
log = commonware.log.getLogger('z.devhub')
@dev_required
@post_required
def disable_payments(request, addon_id, addon):
addon.update(wants_contributions=False)
return redirect(addon.get_dev_url('payments'))
@dev_required(owner_for_post=True, webapp=True)
def payments(request, addon_id, addon, webapp=False):
premium_form = forms.PremiumForm(
request.POST or None, request=request, addon=addon,
user=request.amo_user)
upsell_form = forms.UpsellForm(
request.POST or None, addon=addon, user=request.amo_user)
bango_account_list_form = forms.BangoAccountListForm(
request.POST or None, addon=addon, user=request.amo_user)
if request.method == 'POST':
success = all(form.is_valid() for form in
[premium_form, upsell_form, bango_account_list_form])
if success:
toggling = premium_form.is_toggling()
try:
premium_form.save()
except client.Error as err:
success = False
log.error('Error setting payment information (%s)' % err)
messages.error(
request, _(u'We encountered a problem connecting to the '
u'payment server.'))
is_now_paid = addon.premium_type in amo.ADDON_PREMIUMS
# If we haven't changed to a free app, check the upsell.
if is_now_paid and success:
try:
upsell_form.save()
bango_account_list_form.save()
except client.Error as err:
log.error('Error saving payment information (%s)' % err)
messages.error(
request, _(u'We encountered a problem connecting to '
u'the payment server.'))
success = False
# Test again in case a call to Solitude failed.
if is_now_paid and success:
# Update the product's price if we need to.
try:
apa = models.AddonPaymentAccount.objects.get(addon=addon)
apa.update_price(addon.premium.price.price)
except models.AddonPaymentAccount.DoesNotExist:
pass
except client.Error:
log.error('Error updating AddonPaymentAccount (%s) price' %
apa.pk)
messages.error(
request, _(u'We encountered a problem while updating '
u'the payment server.'))
success = False
# If everything happened successfully, give the user a pat on the back.
if success:
messages.success(request, _('Changes successfully saved.'))
return redirect(addon.get_dev_url('payments'))
# TODO: This needs to be updated as more platforms support payments.
cannot_be_paid = (
addon.premium_type == amo.ADDON_FREE and
any(premium_form.device_data['free-%s' % x] == y for x, y in
[('phone', True), ('tablet', True), ('desktop', True),
('os', False)]))
return jingo.render(
request, 'developers/payments/premium.html',
{'addon': addon, 'webapp': webapp, 'premium': addon.premium,
'form': premium_form, 'upsell_form': upsell_form,
'DEVICE_LOOKUP': DEVICE_LOOKUP,
'is_paid': addon.premium_type in amo.ADDON_PREMIUMS,
'no_paid': cannot_be_paid,
'is_incomplete': addon.status == amo.STATUS_NULL,
# Bango values
'bango_account_form': forms.BangoPaymentAccountForm(),
'bango_account_list_form': bango_account_list_form, })
def payments_accounts(request):
bango_account_form = forms.BangoAccountListForm(
user=request.amo_user, addon=None)
return jingo.render(
request, 'developers/payments/includes/bango_accounts.html',
{'bango_account_list_form': bango_account_form})
@write
@post_required
def payments_accounts_add(request):
form = forms.BangoPaymentAccountForm(request.POST)
if not form.is_valid():
return http.HttpResponse(form.happy_errors, status=400)
try:
models.PaymentAccount.create_bango(
request.amo_user, form.cleaned_data)
except client.Error as e:
log.error('Error creating Bango payment account; %s' % e)
return http.HttpResponse(
_(u'Could not connect to payment server.'), status=400)
return redirect('mkt.developers.bango.payment_accounts')
@write
@post_required
def payments_accounts_delete(request, id):
get_object_or_404(models.PaymentAccount, pk=id).cancel()
return http.HttpResponse('success')
|
[
"me@mattbasta.com"
] |
me@mattbasta.com
|
bab21987f86bbc8fd402273f2f354f0e27ec86d3
|
6ee0d402a55dc32121b548b48203d3098ead8726
|
/28.py
|
d8899a61085bb579ebcf16aed5bc4d403ad11cf3
|
[] |
no_license
|
tomasolodun/kolokvium
|
81e143f8aff2fb0d1b85fb3b51718056ff6130ef
|
5eebc57f62e6e3cf60bf1927db6e0dcdde98cba8
|
refs/heads/master
| 2022-04-24T21:44:28.905413
| 2020-04-22T10:37:38
| 2020-04-22T10:37:38
| 257,869,203
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
py
|
'''Знайти кількість парних елементів одновимірного масиву. '''
import random
array = [random.randint(10, 100) for i in range(10)]
print('Даний масив:\n', array)
counter = 0
for j in array:
if j % 2 == 0:
counter += 1
print(f'Кількість парних елементів масиву дорівнює {counter}')
|
[
"noreply@github.com"
] |
tomasolodun.noreply@github.com
|
5fe5023e4efdd4de9a18877f41d26fdc17789906
|
04fb184b60c141ab83b7d22edb04e85a7f82c6dc
|
/core/collections/search.py
|
b655a5eb2773896e53bf937a6ad44da906ca9e3e
|
[] |
no_license
|
jwnasambu/oclapi2
|
4f201d35fffe715306c9b63d4374530e7e32a80b
|
5d5fdd94206ebd63cee81c99931df5ab51f12fe8
|
refs/heads/master
| 2023-03-06T01:59:39.752977
| 2021-02-24T11:30:57
| 2021-02-24T11:30:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
from elasticsearch_dsl import TermsFacet
from core.collections.models import Collection
from core.common.search import CommonSearch
class CollectionSearch(CommonSearch):
index = 'collections'
doc_types = [Collection]
fields = ['collection_type', 'locale', 'owner', 'owner_type', 'is_active', 'version']
facets = {
'collectionType': TermsFacet(field='collection_type'),
'locale': TermsFacet(field='locale'),
'owner': TermsFacet(field='owner'),
'ownerType': TermsFacet(field='owner_type'),
'is_active': TermsFacet(field='is_active'),
'version': TermsFacet(field='version'),
}
|
[
"sny.aggarwal@gmail.com"
] |
sny.aggarwal@gmail.com
|
c697bbca17a2af6db4702005d7f7bfa7955871e2
|
133ab71dce3dcb281a72767d4bcd37b14e22bc84
|
/recommendsystem/assessment.py
|
520dc9b89d2c5c19fea6e5059f9b2be60d1af8b9
|
[] |
no_license
|
CxwAlex/recommendsystem_new
|
c1d7bb75072cd519b597e631eebc7d6d60c62af8
|
00f52edc4abd564fd715ee1f64607185f85486e9
|
refs/heads/master
| 2020-03-17T02:51:03.056397
| 2018-05-19T08:05:29
| 2018-05-19T08:05:29
| 133,208,812
| 1
| 0
| null | 2018-05-19T08:05:29
| 2018-05-13T05:34:41
|
Python
|
UTF-8
|
Python
| false
| false
| 7,021
|
py
|
import math
import time
from pandas import Series, DataFrame
from recommendsystem.engine_cf import ItemSimilarityCF
#对推荐结果进行总结
#其中,train是原始的训练集dataframe, recommend是推荐结果dataframe
#test是测试集的dataframe或者多元组
def Summary(train, recommend, test, item_similarity=None):
t0 = time.clock()
recall = Recall(recommend, test)
t1 = time.clock()
time_recall = t1 - t0
print("recall:", recall, " time_recall:", time_recall)
precision = Precision(recommend, test)
t2 = time.clock()
time_precision = t2 - t1
print("precision:", precision, " time_precision:", time_precision)
coverage = Coverage(train, recommend)
t3 = time.clock()
time_coverage = t3 - t2
print("coverage:", coverage, " time_coverage:", time_coverage)
popularity = Popularity(train, recommend)
t4 = time.clock()
time_popularity = t4 - t3
print("popularity:", popularity, " time_popularity:", time_popularity)
novelty = Novelty(train, recommend)
t5 = time.clock()
time_novelty = t5 - t4
print("novelty:", novelty, " time_novelty:", time_novelty)
if not isinstance(item_similarity, DataFrame):
item_similarity = ItemSimilarityCF(train)
diversity = Diversity(recommend, item_similarity)
t6 = time.clock()
time_diversity = t6 - t5
print("diversity:", diversity, " time_diversity:", time_diversity)
'''
index_all = ['recall', 'precision', 'coverage', 'popularity', 'novelty', 'diversity']
result = Series(index=index_all)
result['recall'] = recall
result['precision'] = precision
result['coverage'] = coverage
result['popularity'] = popularity
result['novelty'] = novelty
result['diversity'] = diversity
'''
time_all = t6 - t0
result = {'recall':recall, 'precision':precision, 'coverage':coverage, 'popularity':popularity, 'novelty':novelty, 'diversity':diversity}
spend_time = {'time_recall':time_recall, 'time_precision':time_precision, 'time_coverage':time_coverage, 'time_popularity':time_popularity, 'time_novelty':time_novelty, 'time_diversity':time_diversity, 'time_all':time_all}
return result, spend_time
#计算准确率和召回率
def Recall(recommend, test):
all = 0
#分为原始数据和dataframe两种方式
if isinstance(test, DataFrame):
for u in test.columns:
for i in test.index:
if test[u][i] != 0:
all += 1
else:
all = len(test)
hit = Hit(recommend, test)
return hit / all
def Precision(recommend, test):
all = 0
N = len(recommend.index)
for user in recommend.columns:
all += N
hit =Hit(recommend, test)
return hit / all
def Hit(recommend, test):
hit = 0
#分为原始数据和dataframe两种方式
if isinstance(test, DataFrame):
for user in test.columns:
for item in test.index:
if test[user][item] != 0:
if user in recommend.columns and item in recommend[user].values:
hit += 1
else:
for i in test:
if i[0] in recommend.columns and i[1] in recommend[i[0]].values:
hit += 1
return hit
#计算覆盖率
def Coverage(train, recommend):
recommend_items = set()
all_items = set()
for item in train.index:
all_items.add(item)
for user in recommend.columns:
for item in recommend[user].values:
recommend_items.add(item)
return len(recommend_items) / len(all_items)
#计算新颖度
#如果推荐的每个物品都很流行,那么结果或很高,新颖度会很低;反之如果每个物品都是冷门物品,则结果约等于1
def Popularity(train, recommend):
popularity = 0
n = 0
#首先计算不同物品的流行度
item_popularity = Series(0.0, index=recommend.index)
for user in train.columns:
for item in train.index:
if train[user][item] != 0:
if item not in item_popularity:
item_popularity[item] = 1
item_popularity[item] += 1
#接着计算推荐物品的流行度
for user in recommend.columns:
for item in recommend[user].values:
if item in item_popularity.index:
popularity += math.log(1 + item_popularity[item])
else:
popularity += math.log(1)
n += 1
popularity /= n
return popularity
def Novelty(train, recommend):
novelty = 1 / Popularity(train, recommend)
return novelty
#利用相似度度量计算推荐列表的多样性
#如果推荐的物品相似度都很高,则说明多样性不强,反之,若相似度都不高,则说明多样性很高
def Diversity(recommend, item_similarity):
similarity = 0
n = 0
recommend_items = set()
for user in recommend.columns:
for item in recommend[user].values:
recommend_items.add(item)
for i in recommend_items:
for j in recommend_items:
if i == j:
continue
if i in item_similarity.columns and j in item_similarity.index:
similarity += item_similarity[i][j]
n += 1
return 1 - math.sqrt(similarity/n)
# 计算RMSE和MAE
def RMSE(records):
'''
print(sum([(rui-pui)*(rui-pui) for u,i,rui,pui in records]) )
print(float(len(records)))
print(math.sqrt(\
sum([(rui-pui)*(rui-pui) for u,i,rui,pui in records]) / float(len(records))))
'''
return math.sqrt( \
sum([(rui - pui) * (rui - pui) for u, i, rui, pui in records]) / float(len(records)))
def MAE(records):
return sum([abs(rui - pui) for u, i, rui, pui in records]) / float(len(records))
# 计算准确率和召回率
def PrecisionRecall(test, N):
hit = 0
n_recall = 0
n_precision = 0
for user, items, rank in test:
hit += sum(map(lambda x, y: 1 if x == y else 0, items, rank))
n_recall += len(items)
n_precision += N
return [hit / (1.0 * n_recall), hit / (1.0 * n_precision)]
'''
def PrecisionRecall(test, N):
#原始版本,rank表示recommend处理推荐之后的结果集
hit = 0
n_recall = 0
n_precision = 0
for user, items in test.items():
rank = Recommend(user, N)
hit += len(rank & items)
n_recall += len(items)
n_precision += N
return [hit / (1.0 * n_recall), hit / (1.0 * n_precision)]
#计算基尼系数
def GiniIndex(p):
j = 1
n = len(p)
G = 0
print(p)
print(sorted(p, key=operator.itemgetter(1)))
for item, weight in sorted(p, key=operator.itemgetter(1)):
G += (2 * j - n - 1) * weight
return G / float(n - 1)
'''
'''
if rv is not None and rsv is not None:
# extract version/subversion
self._nmap_version_number = int(line[rv.start():rv.end()])
self._nmap_subversion_number = int(line[rsv.start()+1:rsv.end()])
break
}
'''
|
[
"32626258+CxwAlex@users.noreply.github.com"
] |
32626258+CxwAlex@users.noreply.github.com
|
284446dfc0afa441f3e0c89b97221c5675fc74ce
|
c32bf6854a42e778c7742a67190355e66b6ecc23
|
/day5_2.py
|
473c0f4a5dd905291e700c4526956388ff3b2508
|
[] |
no_license
|
sweavo/code-advent-2020
|
ee56eb26b469b91ce22b7a37802d745e290d8382
|
f6bad2eafbb5aa413474dd8caa28f11b05d58f44
|
refs/heads/main
| 2023-02-08T14:06:40.375901
| 2020-12-22T07:47:09
| 2020-12-22T07:47:09
| 317,208,166
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,107
|
py
|
import day5_1
""" Insights:
The pass IDs are just binary numbers: if B and R are 1 and F and L are
0 then you are just evaluating a 10-bit binary integer to derive the
pass ID.
We know that the whole plane is full, though the integers do not
necessarily start at 0 nor extend to MAX_10BIT_UINT. We know that the
number we're seeking is not at either end of the range. So we have two
contiguous blocks of integers separated by a single missing int, but
presented to us shuffled.
What's a good way to find a discontinuity within a dataset that is
otherwise contiguous?
sort it, and find the first number out of sequence.
"""
def find_the_gap1( values ):
""" return what would be next in the first group of sorted contiguous values
>>> find_the_gap1( [ 5,6,7,9,10 ] )
8
"""
offset = values[0]
for index, value in enumerate(values):
if value != index + offset:
return index + offset
def day5_2():
"""
>>> day5_2()
522
"""
sorted_ints = day5_1.sorted_pass_ids(day5_1.day5input.BOARDING_PASSES)
return find_the_gap1( sorted_ints )
|
[
"steve.carter@etas.com"
] |
steve.carter@etas.com
|
9d474a6d8f21485f09d743bb0dc4a45fe24fc8b6
|
8a8b5fca6845d61e80f1c7fcafd896174009c36f
|
/w8e3_add.py
|
011b528316955921a57250e9bf1c0c20a3c1a962
|
[
"Apache-2.0"
] |
permissive
|
ibyt32/pynet_test
|
e9217afd765cbf0b5f076df812528d18d4df215b
|
3b05d907fa0077d793b47dae36466a56d4ee3ef0
|
refs/heads/master
| 2020-05-22T01:16:42.635933
| 2016-11-28T00:50:48
| 2016-11-28T00:50:48
| 65,155,209
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 996
|
py
|
#!/usr/bin/env python
'''
Python and Ansible for Network Engineers
Week 8, Exercise 3
Create two new test NetworkDevices in the database. Use both direct object
creation and the .get_or_create() method to create the devices.
'''
import django
from net_system.models import NetworkDevice
def main():
'''
Create two new test NetworkDevices in the database. Use both direct object
creation and the .get_or_create() method to create the devices.
'''
django.setup()
devices = NetworkDevice.objects.all()
Kelli1 = NetworkDevice(
device_name='Kelli1',
device_type='cisco_ios',
ip_address='10.10.10.10',
port=22,
)
Kelli1.save()
Kelli2 = NetworkDevice.objects.get_or_create(
device_name='Kelli2',
device_type='cisco_ios',
ip_address='11.11.11.11',
port=22,
)
for a_device in devices:
print a_device.device_name, a_device.ip_address
if __name__ == "__main__":
main()
|
[
"ibyt32@gmail.com"
] |
ibyt32@gmail.com
|
e5f7d995c46381db8b00cfd344bcfb4d9222e613
|
d1eff6a5ed14ff33e87155c6e50500503458295d
|
/anaconda_project/version.py
|
dab2504eee1ebd6a2247f2577b8b446cbebc817f
|
[
"BSD-3-Clause"
] |
permissive
|
alymanap/anaconda-project
|
b49ee534eab935fb969206b68c86ef7cda64dd8f
|
7e3e518b2f0a71cf5f6c9a2aa73adbafddc5b8d8
|
refs/heads/master
| 2021-07-07T22:39:44.617666
| 2017-10-04T03:04:16
| 2017-10-04T03:04:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 525
|
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
"""Version of anaconda-project library."""
VERSION_INFO = (0, 8, 2, 'dev0')
__version__ = '.'.join(map(str, VERSION_INFO))
version = __version__
|
[
"goanpeca@gmail.com"
] |
goanpeca@gmail.com
|
85d1daf64a3d7937820bc5e3bf5908989efffb16
|
1bcf0ceba6c49a23718769695298ccf5f7052b32
|
/compreseguros_com/compreseguros_com/items.py
|
2dec6c16f0e6580aad22807cb441354537852570
|
[
"MIT"
] |
permissive
|
hanjihun/Car
|
eb54b97bd0707cc650b8bb6730a4dda6a4297138
|
546348a1886efdef18a804a03c9862f88a6b1809
|
refs/heads/master
| 2021-01-20T12:56:51.264382
| 2017-03-02T20:06:11
| 2017-03-02T20:06:11
| 82,671,985
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 295
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class CompresegurosComItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
|
[
"bbanzzakji@gmail.com"
] |
bbanzzakji@gmail.com
|
fc0c34c42e17b28ac6a02a6bcfb40ba5edff18d3
|
6b7a78af0cc06a583026e3e448d5d5c5adf7c861
|
/blurTest4.py
|
fd66e6c91d3326cebf8b9a3abbcc7ec392065965
|
[] |
no_license
|
zhangjiwei-japan/Image-Segmentation-Tools
|
9c6263b06745ce3da6d21cb1528b7c1014dc4af1
|
151d0a97248ccab3a5f38d7d521b1d38e76011f4
|
refs/heads/master
| 2021-06-01T19:03:53.241586
| 2015-01-31T06:05:46
| 2015-01-31T06:05:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,854
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 24 00:36:10 2014
@author: Avilash
"""
#A port of a MATLAB retinal segmentation alogorithm
#Using Dijkstra's algorithm to find the edges in an OCT image of the retina
#import cv2
import numpy as np
#impletmentation of Dijkstra's algorithm. Runs in O(m log n), m is the number
#of edges, and n the number of nodes
import dijkstra as dj
from scipy import misc, ndimage
from PIL import Image
#Changes the sparse array into a dict of dicts
#which can be read in by dijkstra.py
import sparseToDict as sD
#from scipy import ndimage
import matplotlib.pyplot as plt
#creates the filter for the blurring step
#in the same way (almost) that MATLAB does it
def matlab_style_gauss2D(shape=(5,20),sigma=3):
"""
2D gaussian mask - should give the same result as MATLAB's
fspecial('gaussian',[shape],[sigma])
"""
m,n = [(ss-1.)/2. for ss in shape]
y,x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )
h[ h < np.finfo(h.dtype).eps*h.max() ] = 0
sumh = h.sum()
if sumh != 0:
h /= sumh
return h
kernel = matlab_style_gauss2D()
img = misc.imread('AVG_Gray_avi2.tif')
#Blur the image (gaussian blue)
img1 = ndimage.filters.correlate(img, kernel, mode='constant')
#img = cv2.GaussianBlur(img, (5,21), 3)
img = Image.fromarray(img1)
#resize the image, with antialiasing. Otherwise will not be the same as
#MATLAB's resize
img = img.resize( (145 , 200), Image.ANTIALIAS) #indices seem to be reversed?
img = np.array(img)
szImg = img.shape
#add a column of zeros on both sides
imgNew = np.lib.pad(img, ((0,0),(1,1)), 'constant')
szImgNew = imgNew.shape
gradImg = np.zeros(szImgNew)
for i in range(0,(int(szImgNew[1]))):
gradImg[:,i] = -1*np.gradient((imgNew[:,i]), 2)
#vertical gradient
gradImg = (gradImg - np.amin(gradImg))/(np.amax(gradImg) - np.amin(gradImg))
#'inverts' the image
gradImgMinus = gradImg*(-1) + 1
#Adjacency matrix
minWeight = 1.0*10**(-5)
#weights
adjMW = np.empty((((int(szImgNew[0]))*(int(szImgNew[1]))),8))
#neg weights
adjMmW = np.empty((((int(szImgNew[0]))*(int(szImgNew[1]))),8))
#point A
adjMX = np.empty((((int(szImgNew[0]))*(int(szImgNew[1]))),8))
#point B
adjMY = np.empty((((int(szImgNew[0]))*(int(szImgNew[1]))),8))
adjMW[:] = np.NAN
adjMmW[:] = np.NAN
adjMX[:] = np.NAN
adjMY[:] = np.NAN
neighborIter = np.array([[1, 1, 1, 0, 0, -1, -1, -1],
[1, 0, -1, 1, -1, 1, 0, -1]])
szadjMW = adjMmW.shape
ind = 0
indR = 0
while ind != ((int(szadjMW[0]))*(int(szadjMW[1]))-1):
#order='F' means fortran style, or column-major (like MATLAB)
(i,j) = np.unravel_index(ind,szadjMW, order='F')
(iX,iY) = np.unravel_index(i,szImgNew, order='F')
(jX,jY) = ((iX + neighborIter[0,j]), (iY + neighborIter[1,j]))
if jX >= 0 and jX <= (int(szImgNew[0])-1) and jY >=0 and jY <= (int(szImgNew[1])-1):
if jY == 0 or jY == (int(szImgNew[1]) - 1):
adjMW[i,j] = minWeight
adjMmW[i,j] = minWeight
else:
adjMW[i,j] = 2 - gradImg[iX,iY] - gradImg[jX, jY] + minWeight
adjMmW[i,j] = 2 - gradImgMinus[iX,iY] - gradImgMinus[jX, jY] + minWeight
#save subscripts
adjMX[i,j] = np.ravel_multi_index((iX,iY), szImgNew, order='F')
adjMY[i,j] = np.ravel_multi_index((jX,jY), szImgNew, order='F')
ind = ind + 1
#ASSEMBLE
a = np.logical_and(np.ravel(~np.isnan(adjMW[:]), order='F'), np.ravel(~np.isnan(adjMX[:]), order='F'))
b = np.logical_and(np.ravel(~np.isnan(adjMY[:]), order='F'), np.ravel(~np.isnan(adjMmW[:]), order='F'))
keepInd = np.logical_and(a,b)
newLen = 0
for p in range (0, (keepInd.size)) :
if keepInd[p]:
newLen = newLen + 1
RealadjMW = np.zeros(newLen)
RealadjMmW = np.zeros(newLen)
RealadjMX = np.zeros(newLen)
RealadjMY = np.zeros(newLen)
q = 0
for r in range (0, (keepInd.size)):
if keepInd[r]:
RealadjMW[q] = adjMW[np.unravel_index(r,szadjMW, order='F')]
RealadjMmW[q] = adjMmW[np.unravel_index(r,szadjMW, order='F')]
RealadjMX[q] = adjMX[np.unravel_index(r,szadjMW, order='F')]
RealadjMY[q] = adjMY[np.unravel_index(r,szadjMW, order='F')]
q = q + 1
#finding the 'shortest path'from dark to light
wGraph = sD.sparseToDict(RealadjMX, RealadjMY, RealadjMW)
#finding the 'shortest path' from light to dark
mwGraph = sD.sparseToDict(RealadjMX, RealadjMY, RealadjMmW)
path1 = dj.shortest_path(wGraph, str(np.amin(RealadjMX)), str(np.amax(RealadjMX)))
path2 = dj.shortest_path(mwGraph, str(np.amin(RealadjMX)), str(np.amax(RealadjMX)))
#sparse matrices, 29400 * 29400
#adjMatrixW = sA.sparser(RealadjMX, RealadjMY, RealadjMW, imgNew.size, imgNew.size)
#adjMatrixMW = sA.sparser(RealadjMX, RealadjMY, RealadjMmW, imgNew.size, imgNew.size)
#plt.imshow(img, cmap=plt.cm.gray)
#plt.show
#print (gradImgMinus)
|
[
"avilash.cramer@gmail.com"
] |
avilash.cramer@gmail.com
|
bfb7111f04289b636cf7c185600a29a68f997888
|
c0b4a6786d501178eac1748883668751eff9f1ff
|
/data_pipelines/County-Data/ohio-scrape.py
|
4553664c20fc8d52a279fe0f84b0349228b335c8
|
[] |
no_license
|
GaloisInc/covid-19
|
399c675fa848408252dd81fcc78da78705d050bc
|
a997943fdca89d69d9cedb914e1e3fb8da1253a0
|
refs/heads/master
| 2023-03-18T22:53:43.805023
| 2021-02-08T20:41:24
| 2021-02-08T20:41:24
| 250,390,514
| 6
| 0
| null | 2021-03-03T05:17:21
| 2020-03-26T22:59:39
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,211
|
py
|
import requests
import pandas as pd
import os, errno
from datetime import datetime
from bs4 import BeautifulSoup
import re
today = datetime.date(datetime.now()).strftime("%Y-%m-%d")
directoryName = 'ohio_county_scrapes'
fileName = directoryName + "/" + directoryName + "_" + today + ".csv"
URL = "https://coronavirus.ohio.gov/wps/portal/gov/covid-19/"
r = requests.get(URL)
r.content
soup = BeautifulSoup(r.content, 'html.parser')
infectedDiv = soup.findAll("div", {"class": "odh-ads__super-script-item"})[0]
deathDiv = soup.findAll("div", {"class": "odh-ads__super-script-item"})[1]
infectedCounties = infectedDiv.text.split(":")[1].strip().split(',')
deathCounties = deathDiv.text.split("**")[1].strip().split(',')
ohioCounties = pd.read_csv('ohio_county_list.csv')
ohioCounties.columns = ['County', 'Coordinates', 'Population']
ohioCounties['County'] = ohioCounties['County'].apply(lambda x: x.split()[0].strip())
confirmedCases = dict()
for county in infectedCounties:
info = county.split()
info[0] = info[0].strip()
info[1] = int(re.match("\((\d+)\)", info[1])[1])
confirmedCases[info[0]] = info[1]
confirmedDF = pd.DataFrame.from_dict(confirmedCases, orient='index').reset_index()
confirmedDF.columns = ["County", "Confirmed"]
deaths = dict()
for county in deathCounties:
info = county.split()
info[0] = info[0].strip()
info[1] = int(re.match("\((\d+)\)", info[1])[1])
deaths[info[0]] = info[1]
deathDF = pd.DataFrame.from_dict(deaths, orient='index').reset_index()
deathDF.columns = ["County", "Deaths"]
ohioCounties = ohioCounties.join(confirmedDF.set_index('County'), on='County')
ohioCounties = ohioCounties.join(deathDF.set_index('County'), on='County')
ohioCounties['Latitude'] = ohioCounties['Coordinates'].apply(lambda x: (re.match('Point\(([^()]+)\)', x)[1].split())[1])
ohioCounties['Longitude'] = ohioCounties['Coordinates'].apply(lambda x: (re.match('Point\(([^()]+)\)', x)[1].split())[0])
ohioCounties = ohioCounties.drop(['Coordinates'], axis=1)
ohioCounties = ohioCounties.fillna(0.0)
ohioCounties['Date'] = today
try:
os.mkdir(directoryName)
except OSError as e:
if e.errno != errno.EEXIST:
raise
ohioCounties.to_csv(fileName)
|
[
"zealabyrinth@gmail.com"
] |
zealabyrinth@gmail.com
|
cc862ff783a8c62ac336374739356cc0db6f1d62
|
bd41fc77c258a4004d51341610bb0c6f2da991b6
|
/pytext/models/output_layers/word_tagging_output_layer.py
|
32ee0b5388388e98ca88be439b65b4e3985d3e26
|
[
"BSD-3-Clause"
] |
permissive
|
jain-m/pytext
|
9850cb4b409661af77627dc204364196351ec5cb
|
f7e1ff48e0f5cca82bd7aa4b973e24beee093fec
|
refs/heads/master
| 2021-01-02T11:08:49.617885
| 2020-02-07T23:30:17
| 2020-02-07T23:42:50
| 239,595,197
| 0
| 0
|
NOASSERTION
| 2020-02-10T19:29:32
| 2020-02-10T19:29:31
| null |
UTF-8
|
Python
| false
| false
| 12,097
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.jit as jit
import torch.nn as nn
import torch.nn.functional as F
from caffe2.python import core
from pytext.common import Padding
from pytext.config.component import create_loss
from pytext.config.serialize import MissingValueError
from pytext.data.utils import Vocabulary
from pytext.loss import (
AUCPRHingeLoss,
BinaryCrossEntropyLoss,
CrossEntropyLoss,
KLDivergenceBCELoss,
KLDivergenceCELoss,
LabelSmoothedCrossEntropyLoss,
)
from pytext.models.crf import CRF
from pytext.utils.label import get_label_weights
from .output_layer_base import OutputLayerBase
from .utils import OutputLayerUtils
class WordTaggingScores(nn.Module):
classes: List[str]
def __init__(self, classes):
super().__init__()
self.classes = classes
def forward(
self, logits: torch.Tensor, context: Optional[Dict[str, torch.Tensor]] = None
) -> List[List[Dict[str, float]]]:
scores: torch.Tensor = F.log_softmax(logits, 2)
return _get_prediction_from_scores(scores, self.classes)
class CRFWordTaggingScores(WordTaggingScores):
def __init__(self, classes: List[str], crf):
super().__init__(classes)
self.crf = crf
self.crf.eval()
def forward(
self, logits: torch.Tensor, context: Dict[str, torch.Tensor]
) -> List[List[Dict[str, float]]]:
# We need seq_lengths for CRF decode
assert "seq_lens" in context
pred = self.crf.decode(logits, context["seq_lens"])
logits_rearranged = _rearrange_output(logits, pred)
scores: torch.Tensor = F.log_softmax(logits_rearranged, 2)
return _get_prediction_from_scores(scores, self.classes)
class WordTaggingOutputLayer(OutputLayerBase):
"""
Output layer for word tagging models. It supports `CrossEntropyLoss` per word.
Args:
loss_fn (CrossEntropyLoss): Cross-entropy loss component. Defaults to None.
Attributes:
loss_fn: Cross-entropy loss component.
"""
class Config(OutputLayerBase.Config):
loss: Union[
CrossEntropyLoss.Config,
BinaryCrossEntropyLoss.Config,
AUCPRHingeLoss.Config,
KLDivergenceBCELoss.Config,
KLDivergenceCELoss.Config,
LabelSmoothedCrossEntropyLoss.Config,
] = CrossEntropyLoss.Config()
label_weights: Dict[str, float] = {}
ignore_pad_in_loss: Optional[bool] = True
@classmethod
def from_config(cls, config: Config, labels: Vocabulary):
vocab = list(labels)
vocab_dict = labels.idx
pad_token_idx = labels.idx.get(labels.pad_token, Padding.DEFAULT_LABEL_PAD_IDX)
label_weights = (
get_label_weights(vocab_dict, config.label_weights)
if config.label_weights
else None
)
return cls(
vocab,
create_loss(
config.loss,
weight=label_weights,
ignore_index=pad_token_idx if config.ignore_pad_in_loss else -1,
),
)
def get_loss(
self,
logit: torch.Tensor,
target: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]],
context: Dict[str, Any],
reduce: bool = True,
) -> torch.Tensor:
"""Compute word tagging loss by comparing prediction of each word in the
sentence with its true label/target.
Args:
logit (torch.Tensor): Logit returned by
:class:`~pytext.models.word_model.WordTaggingModel`.
targets (torch.Tensor): True document label/target.
context (Dict[str, Any]): Context is a dictionary of items
that's passed as additional metadata. Defaults to None.
reduce (bool): Whether to reduce loss over the batch. Defaults to True.
Returns:
torch.Tensor: Word tagging loss for all words in the sentence.
"""
# flatten the logit from [batch_size, seq_lens, dim] to
# [batch_size * seq_lens, dim]
flattened_logit = logit.view(-1, logit.size()[-1])
if isinstance(target, tuple):
hard_target, _, soft_target = target
target = (
hard_target.view(-1),
None,
soft_target.view(-1, soft_target.size()[-1]),
)
return self.loss_fn(flattened_logit, target, reduce)
return self.loss_fn(flattened_logit, target.view(-1), reduce)
def get_pred(
self, logit: torch.Tensor, *args, **kwargs
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute and return prediction and scores from the model.
Prediction is computed using argmax over the word label/target space.
Scores are softmax scores over the model logits.
Args:
logit (torch.Tensor): Logits returned
:class:`~pytext.models.word_model.WordTaggingModel`.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Model prediction and scores.
"""
preds = torch.max(logit, 2)[1]
scores = F.log_softmax(logit, 2)
return preds, scores
def export_to_caffe2(
self,
workspace: core.workspace,
init_net: core.Net,
predict_net: core.Net,
model_out: torch.Tensor,
output_name: str,
) -> List[core.BlobReference]:
"""Exports the word tagging output layer to Caffe2."""
probability_out = predict_net.Softmax(output_name, axis=model_out.dim() - 1)
return OutputLayerUtils.gen_additional_blobs(
predict_net, probability_out, model_out, output_name, self.target_names
)
def torchscript_predictions(self):
return jit.script(WordTaggingScores(self.target_names))
class CRFOutputLayer(OutputLayerBase):
"""
Output layer for word tagging models that use Conditional Random Field.
Args:
num_tags (int): Total number of possible word tags.
Attributes:
num_tags: Total number of possible word tags.
"""
__EXPANSIBLE__ = True
@classmethod
def from_config(cls, config: OutputLayerBase.Config, labels: Vocabulary):
vocab_size = len(labels)
return cls(vocab_size, labels)
def __init__(self, num_tags, labels: Vocabulary, *args) -> None:
super().__init__(list(labels), *args)
self.crf = CRF(
num_tags=num_tags,
ignore_index=labels.get_pad_index(Padding.DEFAULT_LABEL_PAD_IDX),
default_label_pad_index=Padding.DEFAULT_LABEL_PAD_IDX,
)
def get_loss(
self,
logit: torch.Tensor,
target: torch.Tensor,
context: Dict[str, Any],
reduce=True,
):
"""Compute word tagging loss by using CRF.
Args:
logit (torch.Tensor): Logit returned by
:class:`~pytext.models.WordTaggingModel`.
targets (torch.Tensor): True document label/target.
context (Dict[str, Any]): Context is a dictionary of items
that's passed as additional metadata. Defaults to None.
reduce (bool): Whether to reduce loss over the batch. Defaults to True.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Model prediction and scores.
"""
loss = -1 * self.crf(logit, target, reduce=False)
return loss.mean() if reduce else loss
def get_pred(
self,
logit: torch.Tensor,
target: Optional[torch.Tensor] = None,
context: Optional[Dict[str, Any]] = None,
):
"""Compute and return prediction and scores from the model.
Prediction is computed using CRF decoding.
Scores are softmax scores over the model logits where the logits are
computed by rearranging the word logits such that decoded word tag has
the highest valued logits. This is done because with CRF, the highest valued
word tag for a given may not be part of the overall set of word tags. In
order for argmax to work, we rearrange the logit values.
Args:
logit (torch.Tensor): Logits returned
:class:`~pytext.models.WordTaggingModel`.
target (torch.Tensor): Not applicable. Defaults to None.
context (Optional[Dict[str, Any]]): Context is a dictionary of items
that's passed as additional metadata. Defaults to None.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Model prediction and scores.
"""
if not context:
raise MissingValueError("Expected non-None context but got None.")
pred = self.crf.decode(logit, context["seq_lens"])
logit_rearranged = _rearrange_output(logit, pred)
scores = F.log_softmax(logit_rearranged, 2)
return pred, scores
def export_to_caffe2(
self,
workspace: core.workspace,
init_net: core.Net,
predict_net: core.Net,
model_out: torch.Tensor,
output_name: str,
) -> List[core.BlobReference]:
"""
Exports the CRF output layer to Caffe2.
See `OutputLayerBase.export_to_caffe2()` for details.
"""
output_score = self.crf.export_to_caffe2(
workspace, init_net, predict_net, output_name
)
probability_out = predict_net.Softmax(output_score, axis=model_out.dim() - 1)
return OutputLayerUtils.gen_additional_blobs(
predict_net, probability_out, model_out, output_name, self.target_names
)
def torchscript_predictions(self):
return jit.script(CRFWordTaggingScores(self.target_names, jit.script(self.crf)))
@jit.script
def _rearrange_output(logit, pred):
"""
Rearrange the word logits so that the decoded word has the highest valued
logits by swapping the indices predicted with those with maximum logits.
"""
max_logits, max_logit_indices = torch.max(logit, 2, keepdim=True)
pred_indices = pred.unsqueeze(2)
pred_logits = torch.gather(logit, 2, pred_indices)
logit_rearranged = logit.scatter(2, pred_indices, max_logits)
logit_rearranged.scatter_(2, max_logit_indices, pred_logits)
return logit_rearranged
@jit.script
def _get_prediction_from_scores(
scores: torch.Tensor, classes: List[str]
) -> List[List[Dict[str, float]]]:
"""
Given scores for a batch, get the prediction for each word in the form of a
List[List[Dict[str, float]]] for callers of the torchscript model to consume.
The outer list iterates over batches of sentences and the inner iterates
over each token in the sentence. The dictionary consists of
`label:score` for each word.
Example:
Assuming slot labels are [No-Label, Number, Name]
Utterances: [[call john please], [Brightness 25]]
Output could look like:
[
[
{ No-Label: -0.1, Number: -1.5, Name: -9.01},
{ No-Label: -2.1, Number: -1.5, Name: -0.01},
{ No-Label: -0.1, Number: -1.5, Name: -2.01},
],
[
{ No-Label: -0.1, Number: -1.5, Name: -9.01},
{ No-Label: -2.1, Number: -0.5, Name: -7.01},
{ No-Label: -0.1, Number: -1.5, Name: -2.01},
]
]
"""
results: List[List[Dict[str, float]]] = []
# Extra verbosity because jit doesn't support zip
for sentence_scores in scores.chunk(len(scores)):
sentence_scores = sentence_scores.squeeze(0)
sentence_response: List[Dict[str, float]] = []
for word_scores in sentence_scores.chunk(len(sentence_scores)):
word_scores = word_scores.squeeze(0)
word_response: Dict[str, float] = {}
for i in range(len(classes)):
word_response[classes[i]] = float(word_scores[i].item())
sentence_response.append(word_response)
results.append(sentence_response)
return results
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
5b6118211603a963a596f50431c0e911b8588a6d
|
85a9ffeccb64f6159adbd164ff98edf4ac315e33
|
/pysnmp/HM2-PLATFORM-QOS-MIB.py
|
266ddf674fd55da0a84500717ba606c6ffe577b8
|
[
"Apache-2.0"
] |
permissive
|
agustinhenze/mibs.snmplabs.com
|
5d7d5d4da84424c5f5a1ed2752f5043ae00019fb
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
refs/heads/master
| 2020-12-26T12:41:41.132395
| 2019-08-16T15:51:41
| 2019-08-16T15:53:57
| 237,512,469
| 0
| 0
|
Apache-2.0
| 2020-01-31T20:41:36
| 2020-01-31T20:41:35
| null |
UTF-8
|
Python
| false
| false
| 2,001
|
py
|
#
# PySNMP MIB module HM2-PLATFORM-QOS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HM2-PLATFORM-QOS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:19:21 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
hm2PlatformMibs, = mibBuilder.importSymbols("HM2-TC-MIB", "hm2PlatformMibs")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ModuleIdentity, Counter64, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, IpAddress, Unsigned32, TimeTicks, Counter32, MibIdentifier, Bits, NotificationType, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Counter64", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "IpAddress", "Unsigned32", "TimeTicks", "Counter32", "MibIdentifier", "Bits", "NotificationType", "ObjectIdentity")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
hm2PlatformQoS = ModuleIdentity((1, 3, 6, 1, 4, 1, 248, 12, 3))
hm2PlatformQoS.setRevisions(('2011-10-28 00:00',))
if mibBuilder.loadTexts: hm2PlatformQoS.setLastUpdated('201110280000Z')
if mibBuilder.loadTexts: hm2PlatformQoS.setOrganization('Hirschmann Automation and Control GmbH')
mibBuilder.exportSymbols("HM2-PLATFORM-QOS-MIB", hm2PlatformQoS=hm2PlatformQoS, PYSNMP_MODULE_ID=hm2PlatformQoS)
|
[
"dcwangmit01@gmail.com"
] |
dcwangmit01@gmail.com
|
a46d27a473a1738f47729fe208035b789ee303df
|
ef8287e4000b539e6e7f60fba37c6f8d0e230d64
|
/tart/tart/urls.py
|
6ceb8bab657cf1cebdc2120858d881274ab1188e
|
[] |
no_license
|
ImInnocent/2021_DEV_MEMBER_2
|
4a85d7d2bc4df41294798e93d06af1464e8389c2
|
7be8cef1a83b5929ba8b69fac701584d51b64134
|
refs/heads/master
| 2023-02-26T14:13:39.833720
| 2021-02-02T07:03:05
| 2021-02-02T07:03:05
| 334,938,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
"""tart URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
urlpatterns = [
path('admin/', admin.site.urls),
path('chat/', include('chat.urls')),
]
|
[
"ssje2006@naver.com"
] |
ssje2006@naver.com
|
4d4b0e3d8d3f2f32be2c213462bbff33e8e26cc5
|
c17bc41b85a79f42b2fbcdcb31a34da314208fad
|
/analysis/Twitterer_in_Evac.py
|
891bce80e6914b533dfe889dc2156cff799fb14d
|
[] |
no_license
|
jenningsanderson/GIS3-Sandy-Project
|
8980c673fd1f191edfb12a219dce968726522823
|
c5494d0ec4fabb52349343ef432c5afd6bc86f5d
|
refs/heads/master
| 2020-05-17T14:48:14.669171
| 2014-04-27T19:19:31
| 2014-04-27T19:19:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,428
|
py
|
# -*- coding: utf-8 -*-
"""##################################
Created on Sat Apr 19 11:06:13 2014
###################################"""
import arcpy
from arcpy import env
env.workspace = r"D:\Project"
env.overwriteOutput = 1
import csv
users = []
with open('ImportToArcmap.csv' , 'rb') as csvfile:
reader = csv.reader(csvfile)
reader.next()
for i in range(0,1065):
this_user = {}
before_row = reader.next()
during_row = reader.next()
after_row = reader.next()
this_user['Name'] = before_row[0]
before_point = arcpy.Point()
before_point.Y = float(before_row[2])
before_point.X = float(before_row[3])
this_user['Before'] = (before_point)
during_point = arcpy.Point()
during_point.Y = float(during_row[2])
during_point.X = float(during_row[3])
this_user['During'] = (during_point)
after_point = arcpy.Point()
after_point.Y = float(after_row[2])
after_point.X = float(after_row[3])
this_user['After'] = (after_point)
users.append(this_user)
sr = arcpy.SpatialReference(4326)
arcpy.CreateFeatureclass_management(env.workspace, 'beforePoints.shp','POINT', spatial_reference = sr )
arcpy.CreateFeatureclass_management(env.workspace, 'duringPoints.shp','POINT', spatial_reference = sr)
arcpy.CreateFeatureclass_management(env.workspace, 'afterPoints.shp','POINT', spatial_reference = sr)
arcpy.AddField_management('beforePoints.shp', 'Name', 'STRING')
arcpy.AddField_management('duringPoints.shp', 'Name', 'STRING')
arcpy.AddField_management('afterPoints.shp', 'Name', 'STRING')
beforeCur = arcpy.InsertCursor('beforePoints.shp')
duringCur = arcpy.InsertCursor('duringPoints.shp')
afterCur = arcpy.InsertCursor('afterPoints.shp')
for user in users:
beforeRow = beforeCur.newRow()
beforeRow.Name = user['Name']
beforeRow.shape = user['Before']
beforeCur.insertRow(beforeRow)
duringRow = duringCur.newRow()
duringRow.Name = user['Name']
duringRow.shape = user['During']
duringCur.insertRow(duringRow)
afterRow = afterCur.newRow()
afterRow.Name = user['Name']
afterRow.shape = user['After']
afterCur.insertRow(afterRow)
del beforeCur, duringCur, afterCur, beforeRow, duringRow, afterRow
zones = 'NYC_EvacZones.shp'
arcpy.Clip_analysis('beforePoints.shp', zones, 'clipped_before.shp')
arcpy.Clip_analysis('duringPoints.shp', zones, 'clipped_during.shp')
arcpy.Clip_analysis('afterPoints.shp', zones, 'clipped_after.shp')
print 'done'
SerBefore = arcpy.SearchCursor('clipped_before.shp')
SerDuring = arcpy.SearchCursor('clipped_during.shp')
SerAfter = arcpy.SearchCursor('clipped_after.shp')
BeforeList = []
DuringList = []
AfterList = []
for people in SerBefore:
BeforeList.append(people.Name)
del people, SerBefore
for people in SerDuring:
DuringList.append(people.Name)
del people, SerDuring
for people in SerAfter:
AfterList.append(people.Name)
del people, SerAfter
#Sheltered-in-place
list(set(BeforeList) & set(DuringList) & set(AfterList))
#Evacuation
list(set(BeforeList) & set(AfterList) - set(DuringList))
#Did not return home
list(set(BeforeList)- set(DuringList)- set(AfterList))
|
[
"jennings.anderson@colorado.edu"
] |
jennings.anderson@colorado.edu
|
aa106740057df2a05d23656d73a3253c19c9d56a
|
eb76fc4e1b9e3b4e53e752103260c3970c91ae96
|
/rest_framework_cassandra_engine/tests/test_serializers.py
|
cdf50da2f6e9fa013534824a7856a3fca12399e1
|
[] |
no_license
|
saltyD0g/rest_framework_django_cassandra_engine
|
d7d6093e0368d5f8639bdce16fbf2af3503e6c5f
|
e54f2ac79d9a7e9b5589bc890708b5ee034d87d9
|
refs/heads/master
| 2021-01-10T20:36:30.329460
| 2014-12-30T22:30:34
| 2014-12-30T22:30:34
| 33,625,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,703
|
py
|
from datetime import datetime
from cqlengine.models import Model as cem
from cqlengine import columns as cec
from unittest import TestCase
from rest_framework_django_cassandra_engine.serializers import CassandraEngineModelSerializer
from rest_framework import serializers as s
class Job(cem):
id = cec.TimeUUID(primary_key=True)
title = cec.Text()
status = cec.Text()
notes = cec.Text(required=False)
on = cec.DateTime(default=datetime.utcnow)
weight = cec.Integer(default=0)
class Meta:
ordering = ('on',)
class JobSerializer(CassandraEngineModelSerializer):
id = s.Field()
title = s.CharField()
status = s.ChoiceField(read_only=True, choices=('draft', 'published'))
sort_weight = s.IntegerField(source='weight')
class Meta:
model = Job
fields = ('id', 'title','status', 'sort_weight')
class TestReadonlyRestore(TestCase):
def test_restore_object(self):
job = Job(title='original title', status='draft', notes='secure')
data = {
'title': 'updated title ...',
'status': 'published', # this one is read only
'notes': 'hacked', # this field should not update
'sort_weight': 10 # mapped to a field with differet name
}
serializer = JobSerializer(job, data=data, partial=True)
self.assertTrue(serializer.is_valid())
print (dir(serializer), serializer.data, serializer.get_fields())
obj = serializer#.object
self.assertEqual(data['title'], obj.title)
self.assertEqual('draft', obj.status)
self.assertEqual('secure', obj.notes)
self.assertEqual(10, obj.weight)
|
[
"blacknsquid@yahoo.com"
] |
blacknsquid@yahoo.com
|
852fdf1d449a59a383d4c8caa9866edb6361903b
|
2cfa0cd5e016d81ecdd3f643e95bd6382652f1ab
|
/toTheMoon/leetcode_063_UniquePathsII.py
|
ff682144e0185e9da6e8e0a8c79f6f1e6cf75000
|
[
"MIT"
] |
permissive
|
jercas/offer66-leetcode-newcode
|
b863871840875cc38e0310b1e20ccaa4040ea134
|
a2e5256f27dbfb23fc34119fc857cd9b00e28c03
|
refs/heads/master
| 2020-05-07T17:43:43.326326
| 2019-10-24T12:52:32
| 2019-10-24T12:52:32
| 180,738,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,580
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 29 16:36:33 2019
@author: jercas
"""
"""
leetcode-63: 不同路径 II MEDIUM
'数组' '动态规划'
一个机器人位于一个 m x n 网格的左上角 (起始点在下图中标记为“Start” )。
机器人每次只能向下或者向右移动一步。机器人试图达到网格的右下角(在下图中标记为“Finish”)。
现在考虑网格中有障碍物。那么从左上角到右下角将会有多少条不同的路径?
Tips: 网格中的障碍物和空位置分别用 1 和 0 来表示。
m 和 n 的值均不超过 100。
"""
"""
Thinking:
1.动态规划法:同理考虑在No.62的基础上的变体,状态转移公式同62题,但需要加些额外判断:
(0) 若[0][0]即为1,说明起点就是堵死的,直接返回0即可;
(1) 在第一行[0][~]和第一列[~][0]中,当为0时,状态转移为dp[0][~] = dp[0][~-1]将前面/上面位置替换本位置,切记不可相加,否则在[[0]]的情况下,shape=(1,1)会计算两遍,返回2的错误值!
当为1时,死路置为0,以便不影响下面的状态转移计算;
以上都是在No.62中 dp = [[1]*m for _ in range(n)]上因为多了障碍后的复杂初始化变化,
(2) 在其他正常位置的,状态转移同Np.62,遇1死路置为0,遇0通路状态转移obstacleGrid[i][j] = obstacleGrid[i][j - 1] + obstacleGrid[i - 1][j]
(3) 最后返回终点值dp[-1][-1]即可
"""
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
时间复杂度:O(m*n),遍历阵大小m*n,32ms beaten 100.100%
空间复杂度:O(1),未使用额外空间,输入矩阵直接作为dp数组,11.7MB beaten 46.30%
"""
# (1)
if obstacleGrid[0][0] == 1:
return 0
obstacleGrid[0][0] = 1
for i in range(len(obstacleGrid)):
for j in range(len(obstacleGrid[0])):
if i == 0 and j == 0:
continue
if obstacleGrid[i][j] == 1:
obstacleGrid[i][j] = 0
else:
# (2)
if i == 0:
obstacleGrid[i][j] = obstacleGrid[i][j - 1]
elif j == 0:
obstacleGrid[i][j] = obstacleGrid[i - 1][j]
# (3)
else:
obstacleGrid[i][j] = obstacleGrid[i][j - 1] + obstacleGrid[i - 1][j]
# (4)
return obstacleGrid[-1][-1]
if __name__ == '__main__':
Q = [
[0,0,0],
[0,1,0],
[0,0,0]
]
A = 2
solution = Solution()
if solution.uniquePathsWithObstacles(Q) == A:
print('The paths through the matrix {0} is {1}'.format(Q, A))
print('ac')
|
[
"jercas0618@163.com"
] |
jercas0618@163.com
|
85cb7838dbd05b606644a14b404c43c95d8ae8b9
|
13446678c3a8cc39ea2fd7dd9a07df60a6be12ac
|
/src/netius/adapters/null.py
|
50cc44fd00a809c644aade5241a3c25984fc49a7
|
[
"Apache-2.0"
] |
permissive
|
maxssage/netius
|
8648d9e40a8144279e669bea95e470d730c8edce
|
7949e336381ded609f97c818c84a6572afefed35
|
refs/heads/master
| 2020-12-25T13:12:52.946000
| 2015-09-22T10:42:42
| 2015-09-22T10:42:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2015 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2015 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
from . import base
class NullAdapter(base.BaseAdapter):
pass
|
[
"joamag@gmail.com"
] |
joamag@gmail.com
|
f3061149ecc0d07e3f5cf27f33a08f5d5fca99ac
|
884e7614fa607fe37d1fcaa312135a4146604a0c
|
/pythonProject/creat_to_efficent_and_yolo/unzip_classfication_update.py
|
1cc45b12c01dd2eab0c00b8726df502fc33569d6
|
[] |
no_license
|
TaoBowoa180011/WorkingScrip
|
7b1620c37af4d4af5ee6e177b35be0e48b282c9e
|
8b495b1bf4b4f063e1835368cf251910b137203e
|
refs/heads/master
| 2023-04-27T19:04:13.262025
| 2021-05-11T01:32:54
| 2021-05-11T01:32:54
| 366,217,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,082
|
py
|
import os
import cv2
import numpy as np
import random
global dstfolder
from tqdm import tqdm
filename="unzip/"
mse_fa=500
def mse(a,b):
try:
err = np.sum((a.astype("float")-b.astype("float"))**2)
err /= float(a.shape[0]* a.shape[0])
except:
err=0
return err
def judgement_class(objlocation,picture_name,output_name):
im = cv2.imread(picture_name)
h, w, _ = im.shape
objlocation = [float(i) for i in objlocation]
x1 = int((float(objlocation[0]) * w))
y1 = int((float(objlocation[1]) * h))
xw = int((float(objlocation[2])) * w / 2)
xh = int((float(objlocation[3])) * h / 2)
crop_img = im[y1 - xh:y1 + xh, x1 - xw:x1 + xw]
cv2.imwrite(output_name, crop_img)
def return_picture(context_2,picture_name):
im = cv2.imread(picture_name)
h, w, _ = im.shape
if type(context_2) ==str:
objlocation = context_2.split(" ")[1:]
else:
objlocation=context_2
objlocation = [float(i) for i in objlocation]
x1 = int((float(objlocation[0]) * w))
y1 = int((float(objlocation[1]) * h))
xw = int((float(objlocation[2])) * w / 2)
xh = int((float(objlocation[3])) * h / 2)
crop_img = im[y1 - xh:y1 + xh, x1 - xw:x1 + xw]
return crop_img, objlocation
def mse_pickup(mse_same,mse_diff,name):
mse_same = list(filter(lambda x:x[3][-4:-1]==name,mse_same))
mse_diff = list(filter(lambda x:x[3][-4:-1]==name,mse_diff))
if mse_diff:
if len(mse_diff) < 80:
if len(mse_same) >80-len(mse_diff):
return random.sample(mse_same,80-len(mse_diff)) + mse_diff
else:
return mse_diff+mse_same
else :
return random.sample(mse_diff,80)
else:
if len(mse_same) > 80:
return random.sample(mse_same,80)
else:
return mse_same
for root,dir,files in os.walk(filename):
for d in tqdm(dir):
if d[-4:]=='data':
continue
dstfolder="location_dataset/"+d.split('_')[3][0]+"/"
# print(dstfolder)
orign_lid=[]
orign_sink=[]
dstfolder_list=[dstfolder,'location_dataset/sink/','location_dataset/sink/picture/','location_dataset/sink/txt/']
for dst in dstfolder_list:
if not os.path.isdir(dst):
os.mkdir(dst)
'''start set orign'''
with open(filename+ d + '/obj_train_data/frame_000000.txt') as orign_txt:
orign_contexts = orign_txt.readlines()
for orign_context in orign_contexts:
if orign_context[0] in ['0','1','3']:
orign_lid.append(return_picture(orign_context,filename+ d + '/obj_train_data/frame_000000.PNG'))
if orign_context[0] in ['2','4']:
orign_sink.append(return_picture(orign_context,filename + d + '/obj_train_data/frame_000000.PNG'))
# print(len(orign_sink))
# print(orign_lid)
"""start all folder"""
# print(orign_sink)
mse_lid_list_same=[]
mse_lid_list_diff=[]
mse_sink_list_same = []
mse_sink_list_diff = []
for r, d1, f in os.walk(filename+ d+'/obj_train_data'):
for f1 in f:
if f1[-1] == 'G':
with open(filename + d + '/obj_train_data/' + f1[:-4] + '.txt') as every_picture:
every_contexts = every_picture.readlines()
fileflag=''
for every_context in every_contexts:
# print(every_context)
if every_context[0] == '0':
fileflag='lidopen/'
if every_context[0] == '1':
fileflag='lidclose/'
if every_context[0] == '3':
fileflag='lidopenwithobj/'
dstfolder_update= dstfolder+fileflag
# print(dstfolder_update)
if not os.path.isdir(dstfolder_update):
os.mkdir(dstfolder_update)
for ds in [dstfolder_update+'picture/',dstfolder_update+'txt/']:
if not os.path.isdir(ds):
os.mkdir(ds)
if orign_sink:
for orign_sink_location in orign_sink:
# print(orign_sink_location)
crop_img,objlocation=return_picture(orign_sink_location[1],
filename+ d+'/obj_train_data/'+f1)
mse_sink_result=mse(orign_sink_location[0],crop_img)
if mse_sink_result <= mse_fa:
mse_sink_list_same.append([mse_sink_result,crop_img,f1])
else:
mse_sink_list_diff.append([mse_sink_result,crop_img,f1])
if orign_lid:
for orign_lid_location in orign_lid:
crop_img,objlocation=return_picture(orign_lid_location[1],
filename + d + '/obj_train_data/' + f1)
mse_lid_result=mse(orign_lid_location[0],crop_img)
if mse_lid_result <= mse_fa:
mse_lid_list_same.append([mse_lid_result,crop_img,f1,dstfolder_update])
else:
mse_lid_list_diff.append([mse_lid_result,crop_img,f1,dstfolder_update])
mse_lid_all_list = []
for name in ['obj','ose','pen']:
mse_lid_all_list += mse_pickup(mse_lid_list_same,mse_lid_list_diff,name)
if len(mse_sink_list_same)>=20:
mse_sink_list_same=random.sample(mse_sink_list_same,20)
if len(mse_sink_list_diff)>=40:
mse_sink_list_diff=random.sample(mse_sink_list_diff,40)
for sink_output in mse_sink_list_same+mse_sink_list_diff:
# print("cp /home/zhen/PycharmProjects/pythonProject/" + filename + d + '/obj_train_data/' + sink_output[2] +
# " /home/zhen/PycharmProjects/pythonProject/location_dataset/sink/picture/" +d+'_'+ sink_output[2])
os.system("cp /home/zhen/PycharmProjects/pythonProject/" + filename + d + '/obj_train_data/' + sink_output[2] +
" /home/zhen/PycharmProjects/pythonProject/location_dataset/sink/picture/" +d+'_'+ sink_output[2])
# print("cp /home/zhen/PycharmProjects/pythonProject/" + filename + d + '/obj_train_data/' + sink_output[2][:-4] + '.txt' +
# " /home/zhen/PycharmProjects/pythonProject/location_dataset/sink/txt/"+ d+'_'+sink_output[2][:-4] + '.txt')
os.system("cp /home/zhen/PycharmProjects/pythonProject/" + filename + d + '/obj_train_data/' + sink_output[2][:-4] + '.txt' +
" /home/zhen/PycharmProjects/pythonProject/location_dataset/sink/txt/" +d+'_'+ sink_output[2][:-4] + '.txt')
for lid_output in mse_lid_all_list:
# print("cp /home/zhen/PycharmProjects/pythonProject/"+filename+d+'/obj_train_data/'+lid_output[2]+
# " /home/zhen/PycharmProjects/pythonProject/"+lid_output[3]+fileflag+'picture/'+d+'_'+lid_output[2])
os.system("cp /home/zhen/PycharmProjects/pythonProject/"+filename+d+'/obj_train_data/'+lid_output[2]+
" /home/zhen/PycharmProjects/pythonProject/"+lid_output[3]+'picture/'+d+'_'+lid_output[2])
# print("cp /home/zhen/PycharmProjects/pythonProject/" + filename + d + '/obj_train_data/' + lid_output[2][:-4]+'.txt' +
# " /home/zhen/PycharmProjects/pythonProject/" + lid_output[3]+ 'txt/'+d +'_'+ lid_output[2][:-4]+'.txt')
os.system("cp /home/zhen/PycharmProjects/pythonProject/" + filename + d + '/obj_train_data/' + lid_output[2][:-4]+'.txt' +
" /home/zhen/PycharmProjects/pythonProject/" + lid_output[3] + 'txt/'+d +'_'+ lid_output[2][:-4]+'.txt')
|
[
"tb167883098@gmail.com"
] |
tb167883098@gmail.com
|
9f314976950e90cf36f6aeced9f303a16984cea5
|
f03df866e1534ccf1ce7bfc6de246bada590e64d
|
/cache/.mako.tmp/index_helper.tmpl.py
|
14e07ba31b2cacd3e0b1c07f57d0c75a1fce58a9
|
[] |
no_license
|
edgecollective/edge-blog.github.io
|
8c71da6ae9f38503a23589d435b108cee817e09a
|
1b5173cd99de16990e6d853e1f7934542bb5744c
|
refs/heads/master
| 2021-06-23T20:47:03.833055
| 2017-09-10T00:46:22
| 2017-09-10T00:46:22
| 100,330,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,397
|
py
|
# -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1505001037.852128
_enable_loop = True
_template_filename = '/home/dwblair/Snakepit/nikola-773/lib/python3.5/site-packages/nikola/data/themes/base/templates/index_helper.tmpl'
_template_uri = 'index_helper.tmpl'
_source_encoding = 'utf-8'
_exports = ['mathjax_script', 'html_pager']
def _mako_get_namespace(context, name):
try:
return context.namespaces[(__name__, name)]
except KeyError:
_mako_generate_namespaces(context)
return context.namespaces[(__name__, name)]
def _mako_generate_namespaces(context):
ns = runtime.TemplateNamespace('math', context._clean_inheritance_tokens(), templateuri='math_helper.tmpl', callables=None, calling_uri=_template_uri)
context.namespaces[(__name__, 'math')] = ns
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer('\n')
__M_writer('\n\n')
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_mathjax_script(context,posts):
__M_caller = context.caller_stack._push_frame()
try:
math = _mako_get_namespace(context, 'math')
__M_writer = context.writer()
__M_writer('\n ')
__M_writer(str(math.math_scripts_ifposts(posts)))
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_html_pager(context):
__M_caller = context.caller_stack._push_frame()
try:
nextlink = context.get('nextlink', UNDEFINED)
prevlink = context.get('prevlink', UNDEFINED)
messages = context.get('messages', UNDEFINED)
__M_writer = context.writer()
__M_writer('\n')
if prevlink or nextlink:
__M_writer(' <nav class="postindexpager">\n <ul class="pager">\n')
if prevlink:
__M_writer(' <li class="previous">\n <a href="')
__M_writer(str(prevlink))
__M_writer('" rel="prev">')
__M_writer(str(messages("Newer posts")))
__M_writer('</a>\n </li>\n')
if nextlink:
__M_writer(' <li class="next">\n <a href="')
__M_writer(str(nextlink))
__M_writer('" rel="next">')
__M_writer(str(messages("Older posts")))
__M_writer('</a>\n </li>\n')
__M_writer(' </ul>\n </nav>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"uri": "index_helper.tmpl", "source_encoding": "utf-8", "line_map": {"64": 9, "65": 9, "66": 9, "67": 9, "68": 12, "69": 13, "70": 14, "71": 14, "72": 14, "73": 14, "74": 17, "80": 74, "23": 2, "26": 0, "31": 2, "32": 20, "33": 25, "39": 23, "44": 23, "45": 24, "46": 24, "52": 3, "59": 3, "60": 4, "61": 5, "62": 7, "63": 8}, "filename": "/home/dwblair/Snakepit/nikola-773/lib/python3.5/site-packages/nikola/data/themes/base/templates/index_helper.tmpl"}
__M_END_METADATA
"""
|
[
"donblair@gmail.com"
] |
donblair@gmail.com
|
075b409dac6f9bf6d113a8ef9214b6134f53426d
|
6ccd8d2e6bc85cb73f205e239ad3618782b7e5bf
|
/caseStudyObjectTracking/eyeTracker.py
|
a937da06f062c3bcdf4ce3d551d25606368fcb75
|
[] |
no_license
|
lkhoho/opencvWithCaseStudy
|
745c901b86caaacf5753dbe2938e3e3c51430d3d
|
a12e590a681c45540b7ef04dc1665077ae488c5f
|
refs/heads/master
| 2021-04-15T15:50:33.718403
| 2018-03-26T09:53:21
| 2018-03-26T09:53:21
| 126,807,560
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,069
|
py
|
import argparse
import numpy as np
import cv2
from utils.imutils import resize
from utils.eyetracker import EyeTracker
ap = argparse.ArgumentParser()
ap.add_argument('-f', '--face', required=True, help='Path to where the face cascade resides')
ap.add_argument('-e', '--eye', required=True, help='Path to where the eye cascade resides')
ap.add_argument('-v', '--video', help='Path to the (optional) video file')
args = vars(ap.parse_args())
et = EyeTracker(args['face'], args['eye'])
if not args.get('video', False):
camera = cv2.VideoCapture(0)
else:
camera = cv2.VideoCapture(args['video'])
while True:
(isGrabbed, frame) = camera.read()
if args.get('video') and not isGrabbed:
break
frame = resize(frame, 600)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
rects = et.track(gray)
for rect in rects:
cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 255, 0), 2)
cv2.imshow('Tracking', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
camera.release()
cv2.destroyAllWindows()
|
[
"keliu@uwm.edu"
] |
keliu@uwm.edu
|
c58507d6fcda5741e7ea9160a8d84c9117300cbb
|
a863483b9fa4e940718a3206340e698d9cdbc341
|
/elpy/rpc-venv/lib/python3.8/site-packages/platformdirs/android.py
|
eda0de737ead7c1262f39092e4b6db523ea89099
|
[
"BSD-2-Clause"
] |
permissive
|
jhsygg/emacs.d
|
ebed42b52d4609e23ad9fa41fa873d2f52980bec
|
addf36f3e2fb3419f547df6180b835036a295542
|
refs/heads/main
| 2023-06-12T07:54:26.863128
| 2023-06-06T07:02:02
| 2023-06-06T07:02:02
| 124,079,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,530
|
py
|
from __future__ import annotations
import os
import re
import sys
from functools import lru_cache
from typing import cast
from .api import PlatformDirsABC
class Android(PlatformDirsABC):
"""
Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_. Makes use of the
`appname <platformdirs.api.PlatformDirsABC.appname>`,
`version <platformdirs.api.PlatformDirsABC.version>`,
`ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "files")
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
"""
return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `user_config_dir`"""
return self.user_config_dir
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g. e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
@property
def site_cache_dir(self) -> str:
""":return: cache directory shared by users, same as `user_cache_dir`"""
return self.user_cache_dir
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "log")
return path
@property
def user_documents_dir(self) -> str:
"""
:return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``
"""
return _android_documents_folder()
@property
def user_pictures_dir(self) -> str:
"""
:return: pictures directory tied to the user e.g. ``/storage/emulated/0/Pictures``
"""
return _android_pictures_folder()
@property
def user_videos_dir(self) -> str:
"""
:return: videos directory tied to the user e.g. ``/storage/emulated/0/DCIM/Camera``
"""
return _android_videos_folder()
@property
def user_music_dir(self) -> str:
"""
:return: music directory tied to the user e.g. ``/storage/emulated/0/Music``
"""
return _android_music_folder()
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "tmp")
return path
@lru_cache(maxsize=1)
def _android_folder() -> str | None:
""":return: base folder for the Android OS or None if cannot be found"""
try:
# First try to get path to android app via pyjnius
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
except Exception:
# if fails find an android folder looking path on the sys.path
pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
return result
@lru_cache(maxsize=1)
def _android_documents_folder() -> str:
""":return: documents folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
Environment = autoclass("android.os.Environment") # noqa: N806
documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
except Exception:
documents_dir = "/storage/emulated/0/Documents"
return documents_dir
@lru_cache(maxsize=1)
def _android_pictures_folder() -> str:
""":return: pictures folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
Environment = autoclass("android.os.Environment") # noqa: N806
pictures_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_PICTURES).getAbsolutePath()
except Exception:
pictures_dir = "/storage/emulated/0/Pictures"
return pictures_dir
@lru_cache(maxsize=1)
def _android_videos_folder() -> str:
""":return: videos folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
Environment = autoclass("android.os.Environment") # noqa: N806
videos_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DCIM).getAbsolutePath()
except Exception:
videos_dir = "/storage/emulated/0/DCIM/Camera"
return videos_dir
@lru_cache(maxsize=1)
def _android_music_folder() -> str:
""":return: music folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
Environment = autoclass("android.os.Environment") # noqa: N806
music_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_MUSIC).getAbsolutePath()
except Exception:
music_dir = "/storage/emulated/0/Music"
return music_dir
__all__ = [
"Android",
]
|
[
"jhsygg@126.com"
] |
jhsygg@126.com
|
f95b2d23490efbb9545f6e0bf0d35ee137da73f7
|
cfc1ff383a1156b5a8e13c86a2635db3381c760c
|
/Loris-PAF/mxc_run_GA/mxc_run_ga_utils.py~
|
089b28501b2860bc731131f0dafc05ff55a9586a
|
[] |
no_license
|
romaintinh/paf
|
afb67483aebfada32c316fef5e4dbab6d6e9b755
|
671aca3d86aa1df01f2cc3112cb4a3e4427d8895
|
refs/heads/master
| 2020-03-21T07:50:15.598713
| 2018-06-28T15:25:46
| 2018-06-28T15:25:46
| 138,302,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,209
|
import random
import copy
import packingUtils
import schedulabilityFunctions
import fileUtils
import global_variables
def Print_individual(individual):
size = len(individual)
for i in range(0,size):
print individual[i]
def Correct_individual_matrix(individual,list_aggregated_servers,list_task_to_schedule_for_each_aggregated_server,nb_hi_task):
nb_aggregate_modal_servers = len(list_aggregated_servers)
for i in range(0,nb_aggregate_modal_servers):
nb_task_to_schedule = len(list_task_to_schedule_for_each_aggregated_server[i])
nb_modal_servers = len(list_aggregated_servers[i])
for k in range(0,nb_modal_servers):
index_task_hi = int(list_aggregated_servers[i][k][1:len(list_aggregated_servers[i][k])])
for j in range(0,nb_task_to_schedule):
index_task_lo = int(list_task_to_schedule_for_each_aggregated_server[i][j][1:len(list_task_to_schedule_for_each_aggregated_server[i][j])])- nb_hi_task
individual[index_task_hi][index_task_lo] = 1
def init_Individual(icls,hi_task_list, nb_hi_tasks, lo_tasks_name, nb_lo_tasks):
#print
#print "init_ind"
individual = []
lo_tasks_name_copy = copy.deepcopy(lo_tasks_name)
previous_step_allocated = False
probability_of_allocation = 0.5
for i in range(0,nb_hi_tasks):
# print "lo_tasks_name_copy"
# print lo_tasks_name_copy
modal_server_line = [0 for k in range(0,nb_lo_tasks)]
nb_task_schedulable_by_modal_server_utilization = len(hi_task_list[i].schedulable_with_utilization)
nb_task_schedulable_by_modal_server_sbf = len(hi_task_list[i].schedulable_with_sbf)
allocate_to_this_modal_server = random.random()
if (nb_task_schedulable_by_modal_server_utilization != 0 or nb_task_schedulable_by_modal_server_sbf != 0) and len(lo_tasks_name_copy) > 0 and allocate_to_this_modal_server >= probability_of_allocation:
task_schedulable = []
task_schedulable.extend(hi_task_list[i].schedulable_with_utilization)
task_schedulable.extend(hi_task_list[i].schedulable_with_sbf)
nb_task_schedulable = len(task_schedulable)
index = random.randint(0, nb_task_schedulable-1)
count_try = 0
while task_schedulable[index].name not in lo_tasks_name_copy and count_try < len(lo_tasks_name_copy):
index = random.randint(0, nb_task_schedulable-1)
count_try += 1
if task_schedulable[index].name in lo_tasks_name_copy:
lo_task_number = lo_tasks_name_copy.index(task_schedulable[index].name)
lo_task_position = int(task_schedulable[index].name[1:len(task_schedulable[index].name)])-nb_hi_tasks
modal_server_line[lo_task_position] = 1
if task_schedulable[index].name not in hi_task_list[i].schedulable_with_sbf:
lo_tasks_name_copy.pop(lo_task_number)
previous_step_allocated = True
if previous_step_allocated == False:
probability_of_allocation -= 0.1
else:
probability_of_allocation = 0.5
individual.append(copy.deepcopy(modal_server_line))
return icls(individual)
def init_Individual_with_part_randomness(icls,hi_task_list, nb_hi_tasks, lo_tasks_name, nb_lo_tasks):
individual = []
lo_tasks_name_copy = copy.deepcopy(lo_tasks_name)
previous_step_allocated = False
probability_of_allocation = 0.2
for i in range(0,nb_hi_tasks):
modal_server_line = [0 for k in range(0,nb_lo_tasks)]
nb_task_schedulable_by_modal_server_utilization = len(hi_task_list[i].schedulable_with_utilization)
nb_task_schedulable_by_modal_server_sbf = len(hi_task_list[i].schedulable_with_sbf)
allocate_to_this_modal_server = random.random()
if (nb_task_schedulable_by_modal_server_utilization != 0 or nb_task_schedulable_by_modal_server_sbf != 0) and len(lo_tasks_name_copy) > 0 and allocate_to_this_modal_server >= probability_of_allocation:
task_schedulable = []
task_schedulable.extend(hi_task_list[i].schedulable_with_utilization)
task_schedulable.extend(hi_task_list[i].schedulable_with_sbf)
nb_task_schedulable = len(task_schedulable)
index = random.randint(0, nb_task_schedulable-1)
count_try = 0
while task_schedulable[index].name not in lo_tasks_name_copy and count_try < len(lo_tasks_name_copy):
index = random.randint(0, nb_task_schedulable-1)
count_try += 1
if task_schedulable[index].name in lo_tasks_name_copy:
lo_task_number = lo_tasks_name_copy.index(task_schedulable[index].name)
lo_task_position = int(task_schedulable[index].name[1:len(task_schedulable[index].name)])-nb_hi_tasks
modal_server_line[lo_task_position] = 1
if task_schedulable[index].name not in hi_task_list[i].schedulable_with_sbf:
lo_tasks_name_copy.pop(lo_task_number)
previous_step_allocated = True
if previous_step_allocated == False:
probability_of_allocation -= 0.1
else:
probability_of_allocation = 0.5
individual.append(copy.deepcopy(modal_server_line))
return icls(individual)
def init_Individual_with_sbf_first(icls,hi_task_list, nb_hi_tasks, lo_tasks_name, nb_lo_tasks):
individual = []
lo_tasks_name_copy = copy.deepcopy(lo_tasks_name)
previous_step_allocated = False
probability_of_allocation = 0.2
for i in range(0,nb_hi_tasks):
modal_server_line = [0 for k in range(0,nb_lo_tasks)]
nb_task_schedulable_by_modal_server_utilization = len(hi_task_list[i].schedulable_with_utilization)
nb_task_schedulable_by_modal_server_sbf = len(hi_task_list[i].schedulable_with_sbf)
allocate_to_this_modal_server = random.random()
if (nb_task_schedulable_by_modal_server_utilization != 0 or nb_task_schedulable_by_modal_server_sbf != 0) and len(lo_tasks_name_copy) > 0 and allocate_to_this_modal_server >= probability_of_allocation:
task_schedulable = []
task_schedulable.extend(hi_task_list[i].schedulable_with_utilization)
task_schedulable.extend(hi_task_list[i].schedulable_with_sbf)
nb_task_schedulable = len(task_schedulable)
if len(hi_task_list[i].schedulable_with_sbf) != 0:
index = random.randint(0, len(hi_task_list[i].schedulable_with_sbf)-1)
count_try = 0
while task_schedulable[index].name not in lo_tasks_name_copy and count_try < len(lo_tasks_name_copy):
index = random.randint(0, len(hi_task_list[i].schedulable_with_sbf)-1)
count_try += 1
if task_schedulable[index].name in lo_tasks_name_copy:
lo_task_number = lo_tasks_name_copy.index(task_schedulable[index].name)
lo_task_position = int(task_schedulable[index].name[1:len(task_schedulable[index].name)])-nb_hi_tasks
modal_server_line[lo_task_position] = 1
previous_step_allocated = True
if previous_step_allocated == False:
index = random.randint(0, len(hi_task_list[i].schedulable_with_utilization)-1)
count_try = 0
while task_schedulable[index].name not in lo_tasks_name_copy and count_try < len(lo_tasks_name_copy):
index = random.randint(0, len(hi_task_list[i].schedulable_with_utilization)-1)
count_try += 1
if task_schedulable[index].name in lo_tasks_name_copy:
lo_task_number = lo_tasks_name_copy.index(task_schedulable[index].name)
lo_task_position = int(task_schedulable[index].name[1:len(task_schedulable[index].name)])-nb_hi_tasks
modal_server_line[lo_task_position] = 1
previous_step_allocated = True
if task_schedulable[index].name not in hi_task_list[i].schedulable_with_sbf:
#lo_tasks_name_copy.pop(lo_task_number)
previous_step_allocated = True
individual.append(copy.deepcopy(modal_server_line))
return icls(individual)
def init_Population(pcls,ind_init,nb_individus):#,hi_task_parameters, nb_task_hi, copy_lo_tasks_name, nb_lo_task, nb_possible_allocation):
population = []
for i in range(0,nb_individus):
string ="IND"+str(i)
individual = ind_init()
individual.generation_of_creation = -1
population.append(individual)
return pcls(population)
def evalAlloc(individual,hi_task_list,nb_hi_tasks,lo_task_list,nb_lo_tasks,task_parameters,hyperperiod):
print "#######################"
print "Eval"
Print_individual(individual)
evaluation = 0.
list_aggregated_name,list_task_to_schedule_for_each_aggregated_server_name = schedulabilityFunctions.Identify_aggregated_servers_and_their_tasks(individual, nb_hi_tasks,nb_lo_tasks,task_parameters)
Correct_individual_matrix(individual,list_aggregated_name,list_task_to_schedule_for_each_aggregated_server_name,nb_hi_tasks)
print "Corrected ind"
Print_individual(individual)
nb_aggregate_modal_servers = len(list_aggregated_name)
schedulability = False
for i in range(0,nb_aggregate_modal_servers):
period_schedulability = False
list_aggregated_modal_server = packingUtils.Convert_task_name_list_into_task_list(list_aggregated_name[i],hi_task_list)
list_task = packingUtils.Convert_task_name_list_into_task_list(list_task_to_schedule_for_each_aggregated_server_name[i],lo_task_list)
print "list_aggregated_modal_server"
packingUtils.showTaskList(list_aggregated_modal_server)
print
print "list_task"
packingUtils.showTaskList(list_task)
print
if len(list_aggregated_modal_server) == 1:
schedulability,task_utilization = schedulabilityFunctions.Check_schedulability_utilization(list_aggregated_modal_server, list_task)
if schedulability == True:
evaluation += task_utilization
period_schedulability = True
else:
evaluation -= task_utilization*10000
print "utilization"
print schedulability
if period_schedulability == False:
task_utilization = schedulabilityFunctions.Compute_task_list_utilization_from_task_name_list(list_task)
schedulability = schedulabilityFunctions.Check_schedulability_sbf_dbf(list_task,list_aggregated_modal_server, hyperperiod)
if schedulability == True:
evaluation += task_utilization
else:
evaluation -= task_utilization*10000
if len(list_aggregated_name[i]) > 1:
global_variables.global_nb_aggregated_servers += len(list_aggregated_name[i])
print("AGGREGATED MODAL SERVERS \n%s\t%s\t%s" % (individual, evaluation, individual.generation_of_creation) )
print "sbf/dbf"
print schedulability
print list_aggregated_name[i]
print list_task_to_schedule_for_each_aggregated_server_name[i]
print
print
print
return evaluation,
def Process_results(individual,hi_task_list,lo_task_list,task_parameters,task_file_path,file_path_result):
nb_hi_tasks=len(hi_task_list)
nb_lo_tasks=len(lo_task_list)
nb_tasks = len(task_parameters)
list_aggregated_name,list_task_to_schedule_for_each_aggregated_server_name = schedulabilityFunctions.Identify_aggregated_servers_and_their_tasks(individual, nb_hi_tasks,nb_lo_tasks,task_parameters)
Correct_individual_matrix(individual,list_aggregated_name,list_task_to_schedule_for_each_aggregated_server_name,nb_hi_tasks)
modal_servers_task_list = []
allocated_tasks_list = []
for i in range(0,len(list_aggregated_name)):
modal_servers_task_list.append(packingUtils.Convert_task_name_list_into_task_list(list_aggregated_name[i],task_parameters))
allocated_tasks_list.append(packingUtils.Convert_task_name_list_into_task_list(list_task_to_schedule_for_each_aggregated_server_name[i],task_parameters))
unallocated_lo_tasks = packingUtils.FindLoTaskNotAllocated(allocated_tasks_list,lo_task_list)
unused_modal_server = packingUtils.FindLoTaskNotAllocated(modal_servers_task_list,hi_task_list)
nb_aggregate_modal_server = len(modal_servers_task_list)
utilization_each_packing = []
final_packing = []
for i in range(0,nb_aggregate_modal_server):
nb_modal_server = len(modal_servers_task_list[i])
server = copy.deepcopy(copy.deepcopy(modal_servers_task_list[i][0]))
for j in range(1,nb_modal_server):
server = packingUtils.pack(server,modal_servers_task_list[i][j],2,1)
final_packing.append(copy.deepcopy(server))
final_packing.extend(copy.deepcopy(unallocated_lo_tasks))
final_packing.extend(copy.deepcopy(unused_modal_server))
final_packing = packingUtils.findOnePacking(final_packing,2)
task_file_path_modal_server = task_file_path+"_modal_server_allocation.txt"
fileUtils.WriteTaskSetAndModalServers(task_file_path_modal_server,task_parameters,lo_task_list,hi_task_list,final_packing,modal_servers_task_list,allocated_tasks_list)
utilizationLoTask = packingUtils.ComputeUtilizationOfTaskList(lo_task_list, 1)
utilizationHiTasksInHiMode = packingUtils.ComputeUtilizationOfTaskList(hi_task_list, 2)
utilizationHiTasksInLoMode = packingUtils.ComputeUtilizationOfTaskList(hi_task_list, 1)
utilizationHiMinusLoHi = utilizationHiTasksInHiMode - utilizationHiTasksInLoMode
Ulimit = utilizationLoTask + utilizationHiTasksInHiMode
utilization_mxc_run = 0.
for i in range(0,len(allocated_tasks_list)):
nb_tasks_allocated = len(allocated_tasks_list[i])
for j in range(0,nb_tasks_allocated):
utilization_mxc_run -= allocated_tasks_list[i][j].utilization[0]
utilization_mxc_run += Ulimit
task_file_path_result = file_path_result
fileUtils.WriteExperimentsValueInFile(task_file_path_result,nb_tasks,Ulimit,utilizationHiTasksInHiMode,utilizationLoTask,utilization_mxc_run, task_file_path,task_file_path_modal_server,len(hi_task_list),utilizationHiMinusLoHi)
def Check_best_ind_fitness(fitness):
nb_parameters = len(fitness)
for i in range(0,nb_parameters):
if fitness[i] < 0:
return False
return True
def mut_flip_bit_aggregation(individual):
choice_modal_server = random.randint(0, len(individual)-1)
choice_task = random.randint(0, len(individual[choice_modal_server])-1)
individual[choice_modal_server][choice_task] = 1
return individual,
def mut_flip_bit_desaggregation(individual):
choice_modal_server = random.randint(0, len(individual)-1)
choice_task = random.randint(0, len(individual[choice_modal_server])-1)
individual[choice_modal_server][choice_task] = 0
return individual,
def Compute_limit_for_agg_desagg(ratio_aggregation):
limit = 0.
if ratio_aggregation <= 0.3:
limit = 0.9 - ratio_aggregation * (0.9-0.7)/0.3
elif 0.3 < ratio_aggregation <= 0.7:
limit = 1 - ratio_aggregation
else:
limit = 1 - 2*ratio_aggregation
if limit < 0:
limit = 0.
return limit
def Round_proper(float_number,digit_precision):
float_number_str = str(float_number)
for i in range(0, len(float_number_str)):
if float_number_str[i] == ".":
digit_position = copy.deepcopy(i)
break
float_number_str_approx = float_number_str[:digit_position+digit_precision+1]
print
print "Round_proper"
print
print "int(float_number_str[digit_position+digit_precision+2])"
print int(float_number_str[digit_position+digit_precision+1])
print float_number_str_approx
carry = True
if int(float_number_str[digit_position+digit_precision+1]) >= 5:
carry = True
for i in range(len(float_number_str_approx)-1,-1,-1):
print float_number_str_approx[i]
if carry == True and i != digit_position:
int_digit = int(float_number_str_approx[i])+1
print str(int_digit%10)[0]
float_number_str_approx = float_number_str_approx[:i]+str(int_digit%10)
if int_digit >= 10 and i != 0:
carry = True
elif int_digit >= 10 and i == 0:
print "Aggrandissement"
print float_number_str_approx
float_number_str_approx = str(1)+float_number_str_approx
print float_number_str_approx
carry = True
else:
carry = False
return float(float_number_str_approx)
|
[
"loris.millet@telecom-paristech.fr"
] |
loris.millet@telecom-paristech.fr
|
|
296aa57462e3fc225deea7abde33d6589be582f9
|
057b5bf1325dd1a642081069ad6d135b0a076ccc
|
/modules/uwebsockets/client.py
|
57128ed65b394839c51ecddb87f3601defdc40af
|
[] |
no_license
|
hiway/micropython-uasyncio-uwebsockets-modules
|
a919b80ae77e6f02654384faba2579d388c2dd39
|
59740adf87f44d5e0e910e97910437927b3d82c4
|
refs/heads/master
| 2020-12-24T13:52:43.682547
| 2016-12-17T12:55:22
| 2016-12-17T12:55:22
| 76,715,616
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,198
|
py
|
"""
Websockets client for micropython
Based very heavily off
https://github.com/aaugustin/websockets/blob/master/websockets/client.py
"""
import ubinascii as binascii
import urandom as random
from ucollections import namedtuple
import uasyncio as asyncio
import ulogging as logging
from .protocol import Websocket
LOGGER = logging.getLogger(__name__)
class WebsocketClient(Websocket):
is_client = True
def parse_endpoint(endpoint):
endpoint = endpoint.lower()
port_map = {'ws': 80, 'wss': 443}
schema, location = endpoint.split('://')
host = location.split('/')[0]
port = int(host.split(':')[1] if ':' in host else port_map[schema])
host = host if ':' not in location else location.split('/')[0].split(':')[0]
path = '/' + '/'.join(location.split('/')[1:]) if '/' in location else '/'
URI = namedtuple('URI', ('scheme', 'hostname', 'port', 'path'))
return URI(schema, host, port, path)
class connect:
"""
Connect a websocket.
This can be used as either a context manager or as a function.
"""
def __init__(self, uri):
self.uri = parse_endpoint(uri) # urllib.parse.urlparse(uri)
# async def __iter__(self):
# """This is a hack to allow the websocket = connect() format."""
# return await self._connect()
async def _connect(self):
assert self.uri.scheme == 'ws'
if __debug__: LOGGER.debug("open connection %s:%s",
self.uri.hostname, self.uri.port)
reader, writer = await asyncio.open_connection(self.uri.hostname,
self.uri.port)
async def send_header(header, *args):
if __debug__: LOGGER.debug(str(header), *args)
await writer.awrite(header % args + '\r\n')
# Sec-WebSocket-Key is 16 bytes of random base64 encoded
key = binascii.b2a_base64(bytes(random.getrandbits(8)
for _ in range(16))).rstrip()
await send_header(b'GET %s HTTP/1.1', self.uri.path or '/')
await send_header(b'Host: %s:%s', self.uri.hostname, self.uri.port)
await send_header(b'Connection: Upgrade')
await send_header(b'Upgrade: websocket')
await send_header(b'Sec-WebSocket-Key: %s', key)
# await send_header(b'Sec-WebSocket-Protocol: chat')
await send_header(b'Sec-WebSocket-Version: 13')
await send_header(b'Origin: http://localhost')
await send_header(b'')
header = await reader.readline()
assert header in [b'HTTP/1.1 101 Switching Protocols\r\n',
b'HTTP/1.1 101 Web Socket Protocol Handshake\r\n'],\
header
# We don't (currently) need these headers
# FIXME: should we check the return key?
while header.rstrip():
if __debug__: LOGGER.debug(str(header))
header = await reader.readline()
return WebsocketClient(reader, writer)
async def __aenter__(self):
self._websocket = await self._connect()
return self._websocket
async def __aexit__(self, exc_type, exc, tb):
await self._websocket.close()
|
[
"harshad@sharma.io"
] |
harshad@sharma.io
|
6209b3c76c9c5c7722b43fe96bd56dd45c65cb22
|
4be9a1b6c1c694962f6e97500a334a34aef19d9f
|
/Python/Even numbers using lambada.py
|
54fd68c4f3559f2f68914405c27302848a4a567f
|
[] |
no_license
|
ndilhara/HacktoberFest2021-2
|
1266ff0b022fee7b2ce02e6a3753c65163f7d5ee
|
84fbe7ca944af18a65255b45ebc8da02830514fa
|
refs/heads/main
| 2023-08-30T20:50:16.398679
| 2021-10-30T18:02:03
| 2021-10-30T18:02:03
| 422,953,223
| 1
| 0
| null | 2021-10-30T18:02:04
| 2021-10-30T17:53:39
| null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
n=int(input("Enter the limit : "))
print(* list(filter(lambda x: (x%2==0),[i for i in range (n)])))
|
[
"noreply@github.com"
] |
ndilhara.noreply@github.com
|
4082b0bb5bf940d7c02eda4aabab9b4936232e86
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/P.O.R.-master/pirates/minigame/Fish.py
|
64018e3380aafaf0bca723f6a59803edbfb23cd6
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,291
|
py
|
import random
import math
from pandac.PandaModules import NodePath, Point3
from direct.interval.IntervalGlobal import Sequence, Parallel, Wait, Func
from direct.interval.LerpInterval import LerpFunc
from direct.task import Task
from panda3d.core import TextNode
import FishingGlobals
from FishFSM import FishFSM
from BlendActor import BlendActor
from pirates.effects.FishIdleBubbleEffect import FishIdleBubbleEffect
from pirates.effects.FishBitingBubbleEffect import FishBitingBubbleEffect
from pirates.effects.FishFightingHookedBubbleEffect import FishFightingHookedBubbleEffect
import MinigameUtils
from pirates.uberdog.UberDogGlobals import InventoryType
class Fish(NodePath):
def __init__(self, fishManager, myData, index, trophy = 0):
NodePath.__init__(self, '%s_%d' % (myData['name'], index))
self.trophy = trophy
self.myData = myData
if not self.trophy:
self.fishManager = fishManager
self.index = index
self.fsm = FishFSM(self)
self.weight = random.randint(self.myData['weightRange'][0], self.myData['weightRange'][1])
else:
self.weight = trophy
self.adjustedScale = (self.myData['scaleRange'][1] - self.myData['scaleRange'][0]) * (self.weight - self.myData['weightRange'][0]) / (self.myData['weightRange'][1] - self.myData['weightRange'][0]) + self.myData['scaleRange'][0]
self.initActor()
if not self.trophy:
self.initVariables()
self.initFishStatusIcon()
if FishingGlobals.wantDebugCollisionVisuals:
self.initCollisions()
self.avoidingFish = False
self.biteBubbleEffect = None
self.idleBubbleEffect = None
self.fightBubbleEffect = None
self.behaviorNameToFunction = {
'straight': self.performStraightBehavior,
'sineStraight': self.performSineStraightBehavior,
'erratic': self.performErraticBehavior }
self.sineDtAccumulator = 0.0
self.erraticDtAccumulator = 0.0
self.myZ = 0.0
if not self.trophy:
self.setLightOff()
def initActor(self):
self.animDict = { }
for anim in FishingGlobals.fishAnimations:
self.animDict[anim] = 'models/char/pir_a_gam_fsh_%s_%s.bam' % (self.myData['model'], anim)
self.actor = BlendActor('models/char/pir_r_gam_fsh_%s.bam' % self.myData['model'], self.animDict, FishingGlobals.defaultFishBlendTime, FishingGlobals.fishBlendTimeDict)
self.actor.reparentTo(self)
self.actor.setScale(self.adjustedScale)
self.mouthJoint = self.actor.exposeJoint(None, 'modelRoot', 'hookAttach')
self.attractionPoint = NodePath('AttractionPoint')
self.attractionPoint.reparentTo(self.mouthJoint)
self.attractionPoint.setPos(0.0, 0.0, 0.0)
self.actor.setPlayRate(self.myData['speed'] * self.myData['swimAnimationMultiplier'], 'swimIdle')
self.actor.setPlayRate(self.myData['speed'] * self.myData['swimAnimationMultiplier'], 'swimIdleOpposite')
self.actor.setPlayRate(self.myData['speed'] * self.myData['turnAnimationMultiplier'], 'turn')
self.actor.setPlayRate(self.myData['speed'] * self.myData['turnAnimationMultiplier'], 'turnOpposite')
if not self.trophy:
self.setBin('fishingGame', 10)
def codeReload(self):
self.actor.setPlayRate(self.myData['speed'] * self.myData['swimAnimationMultiplier'], 'swimIdle')
self.actor.setPlayRate(self.myData['speed'] * self.myData['swimAnimationMultiplier'], 'swimIdleOpposite')
self.actor.setPlayRate(self.myData['speed'] * self.myData['turnAnimationMultiplier'], 'turn')
self.actor.setPlayRate(self.myData['speed'] * self.myData['turnAnimationMultiplier'], 'turnOpposite')
def initFishStatusIcon(self):
self.fishStatusIconTextNode = TextNode('fishBitingIcon')
self.fishStatusIconNodePath = NodePath(self.fishStatusIconTextNode)
self.fishStatusIconNodePath.setPos(0.0, 0.0, self.myData['indicatorHeightOffset'])
self.fishStatusIconTextNode.setText('?')
self.fishStatusIconTextNode.setTextColor(1.0, 0.0, 0.0, 1.0)
self.fishStatusIconNodePath.reparentTo(self.mouthJoint)
self.fishStatusIconNodePath.setBillboardPointEye()
self.fishStatusIconNodePath.hide()
self.fishStatusIconNodePath.setShaderOff()
def initVariables(self):
self.attractionVisual = None
self.collisionVisual = None
self.movingRight = True
self.turnSpeed = 160.0
self.turnTowardLureInterval = None
self.velocity = FishingGlobals.baseFishVelocity * self.myData['speed']
self.accel = FishingGlobals.baseFishAccel * self.myData['speed']
self.fishMoveSequence = None
self.bubbleEffect = None
def initCollisions(self):
self.collisionVisual = loader.loadModel('models/props/crate')
self.collisionVisual.setTransparency(1)
self.collisionVisual.setColor(1.0, 1.0, 1.0, 0.29999999999999999)
self.collisionVisual.setScale(*self.myData['collisionBoxSize'])
self.collisionVisual.setPos(*self.myData['collisionBoxOffset'])
self.collisionVisual.reparentTo(self)
self.collisionVisual.hide()
self.attractionVisual = loader.loadModel('models/ammunition/cannonball')
self.attractionVisual.setTransparency(1)
self.attractionVisual.setColor(0.0, 1.0, 0.0, 0.29999999999999999)
self.attractionVisual.setScale(self.myData['attractionRadius'])
self.attractionVisual.reparentTo(self.attractionPoint)
self.attractionVisual.hide()
self.collisionVisualVisible = False
def hide(self):
NodePath.hide(self)
if self.idleBubbleEffect:
self.idleBubbleEffect.hide()
def show(self):
NodePath.show(self)
if self.idleBubbleEffect:
self.idleBubbleEffect.show()
def reloadCollisions(self):
if FishingGlobals.wantDebugCollisionVisuals:
self.collisionVisual.removeNode()
self.attractionVisual.removeNode()
self.initCollisions()
def cleanFishData(self):
pass
def destroy(self):
self.closeFish = []
self.actor.destroy()
self.stopIdleBubbleEffect()
self.stopFightBubbleEffect()
if self.fishMoveSequence:
self.fishMoveSequence.pause()
self.fishMoveSequence = None
if self.fsm:
del self.fsm
self.fsm = None
self.behaviorNameToFunction = { }
self.removeNode()
def pickPositionAndSwim(self):
self.initVariables()
self.actor.clearControlEffectWeights()
if self.myData['depth'] == 0:
depth = random.uniform(FishingGlobals.fishingLevelBoundaries[self.myData['depth']], self.fishManager.gameObject.waterLevel + FishingGlobals.fishSpawnBelowWaterLevelHeight)
else:
depth = random.uniform(FishingGlobals.fishingLevelBoundaries[self.myData['depth']], FishingGlobals.fishingLevelBoundaries[self.myData['depth'] - 1])
startX = random.uniform(FishingGlobals.leftFishBarrier + 5.0, FishingGlobals.rightFishBarrier - 5.0)
self.setPos(startX, 0.0, depth)
if random.randint(0, 1):
self.fsm.request('TurnAround', 'Swimming', False)
else:
self.fsm.request('Swimming')
def turnAround(self, nextState, shouldMoveRight):
if self.velocity[0] < 0 and shouldMoveRight:
self.velocity[0] = -self.velocity[0]
elif self.velocity[0] > 0 and not shouldMoveRight:
self.velocity[0] = -self.velocity[0]
self.movingRight = self.velocity[0] > 0
if self.fishMoveSequence:
self.fishMoveSequence.pause()
self.fishMoveSequence.clearToInitial()
animationToTurn = 'turn'
if self.movingRight:
animationToTurn = 'turnOpposite'
durationOfFishTurn = self.myData['durationOfFishTurn']
self.fishMoveSequence = Parallel(Sequence(Func(self.actor.changeAnimationTo, animationToTurn, False), Wait(durationOfFishTurn), Func(self.fsm.request, nextState)), Sequence(Wait(durationOfFishTurn * 0.33000000000000002), Func(self.setXVelocity, 0.0), Wait(durationOfFishTurn * 0.66000000000000003), Func(self.setXVelocity, self.velocity[0])), name = '%s_turnAroundInterval' % self.getName())
self.velocity[0] = -self.velocity[0]
self.fishMoveSequence.start()
def setXVelocity(self, newVel):
self.velocity[0] = newVel
def checkForBiting(self):
if self.fishManager.activeFish is not None:
return None
if self.fishManager.gameObject.fsm.getCurrentOrNextState() not in [
'Fishing',
'Reeling',
'LureStall',
'LegdFishShow']:
return None
inv = localAvatar.getInventory()
rodLvl = inv.getItemQuantity(InventoryType.FishingRod)
if self.myData['depth'] + 1 > rodLvl:
return None
self.fsm.request('Biting')
def checkForBoxOverlap(self, otherFish):
pos = self.getPos(self.fishManager.gameObject.fishingSpot)
size = self.myData['collisionBoxSize']
offset = list(self.myData['collisionBoxOffset'])
otherPos = otherFish.getPos()
otherSize = otherFish.myData['collisionBoxSize']
otherOffset = list(otherFish.myData['collisionBoxOffset'])
if pos[0] + size[0] / 2.0 + offset[0] > (otherPos[0] - otherSize[0] / 2.0) + otherOffset[0] and (pos[0] - size[0] / 2.0) + offset[0] < otherPos[0] + otherSize[0] / 2.0 + otherOffset[0] and pos[2] + size[2] / 2.0 + offset[2] > (otherPos[2] - otherSize[2] / 2.0) + otherOffset[2] and (pos[2] - size[2] / 2.0) + offset[2] < otherPos[2] + otherSize[2] / 2.0 + otherOffset[2]:
return True
return False
def checkForCloseFish(self, index):
if index < len(self.fishManager.uncaughtFish) - 1:
for i in xrange(index + 1, len(self.fishManager.uncaughtFish)):
if self.fishManager.uncaughtFish[i].index != self.index:
if self.checkForBoxOverlap(self.fishManager.uncaughtFish[i]):
self.closeFish.append(self.fishManager.uncaughtFish[i])
if FishingGlobals.wantDebugCollisionVisuals:
self.collisionVisual.setColor(1, 0, 0, 0.29999999999999999)
self.checkForBoxOverlap(self.fishManager.uncaughtFish[i])
if len(self.closeFish) == 0:
if FishingGlobals.wantDebugCollisionVisuals:
self.collisionVisual.setColor(1, 1, 1, 0.29999999999999999)
def checkForLures(self, currentState, lurePos):
if (self.getX() + FishingGlobals.fishAttractionOffset < lurePos[0] or self.movingRight or self.getX() - FishingGlobals.fishAttractionOffset > lurePos[0]) and not (self.movingRight):
if self.attractionPoint.getDistance(self.fishManager.gameObject.lure) < self.myData['attractionRadius'] + self.fishManager.gameObject.lure.lureAttractRadius:
self.checkForBiting()
def update(self, dt, index, lurePos):
currentState = self.fsm.getCurrentOrNextState()
self.closeFish = []
if currentState in [
'ScareAway',
'Swimming',
'Flee',
'TurnAround']:
self.checkForCloseFish(index)
if currentState in [
'Swimming']:
self.checkForLures(currentState, lurePos)
self.updateBasedOnBehavior(dt, lurePos)
elif currentState in [
'Hooked',
'AboutToFight',
'HookedFighting']:
self.checkForCloseFish(-1)
for fish in self.closeFish:
self.makeFishRunFromMe(fish)
def makeFishRunFromMe(self, otherFish):
if otherFish.fsm.getCurrentOrNextState() == 'Flee' or otherFish.fsm.getCurrentOrNextState() == 'TurnAround':
return None
if otherFish.getX() < self.getX(self.fishManager.gameObject.fishingSpot) and otherFish.movingRight:
otherFish.fsm.request('TurnAround', 'Flee', False)
elif otherFish.getX() > self.getX(self.fishManager.gameObject.fishingSpot) and not (otherFish.movingRight):
otherFish.fsm.request('TurnAround', 'Flee', True)
else:
otherFish.fsm.request('Flee')
def updateBasedOnBehavior(self, dt, lurePos):
currentState = self.fsm.getCurrentOrNextState()
newX = self.getX()
newY = self.getY()
newZ = self.getZ()
for fish in self.closeFish:
if self.myData['size'] == 'small' and fish.myData['size'] == 'large':
if self.checkForEating(fish):
return None
self.avoidingFish = True
if fish.velocity[1] > 0.0 and fish.avoidingFish:
self.velocity[1] = -(FishingGlobals.fishAvoidYVelocity)
else:
self.velocity[1] = FishingGlobals.fishAvoidYVelocity
if abs(fish.getY() - self.getY()) > self.myData['collisionBoxSize'][1] + fish.myData['collisionBoxSize'][1]:
self.velocity[1] = 0.0
continue
if len(self.closeFish) == 0 and abs(self.getY()) > FishingGlobals.fishYTolerance:
self.avoidingFish = False
if self.getY() > 0:
self.velocity[1] = -(FishingGlobals.fishAvoidYVelocity)
else:
self.velocity[1] = FishingGlobals.fishAvoidYVelocity
elif len(self.closeFish) == 0 and abs(self.getY()) < FishingGlobals.fishYTolerance:
self.avoidingFish = False
self.velocity[1] = 0.0
self.setY(0.0)
newY = self.getY() + self.velocity[1] * dt + self.accel[1] * dt * dt
if currentState in [
'Swimming',
'TurnAround',
'Flee',
'ScareAway']:
if currentState == 'ScareAway':
(newX, newZ) = self.performScareAwayBehavior(dt, self.velocity, self.accel)
elif currentState == 'Flee':
(newX, newZ) = self.performFleeBehavior(dt, self.velocity, self.accel)
else:
(newX, newZ) = self.behaviorNameToFunction[self.myData['behaviorDict']['name']](dt, self.velocity, self.accel)
currentState = self.fsm.getCurrentOrNextState()
if newX < FishingGlobals.leftFishBarrier:
if currentState == 'ScareAway':
if newX < FishingGlobals.leftFishBarrier - FishingGlobals.fullyOffscreenXOffset:
self.fsm.request('Offscreen')
return None
elif currentState != 'TurnAround' and not (self.movingRight):
self.fsm.request('TurnAround', 'Swimming', True)
elif newX > FishingGlobals.rightFishBarrier:
if currentState != 'TurnAround' and self.movingRight:
self.fsm.request('TurnAround', 'Swimming', False)
newZ = min(max(FishingGlobals.fishingLevelBoundaries[len(FishingGlobals.fishingLevelBoundaries) - 1], newZ), self.fishManager.gameObject.waterLevel + FishingGlobals.fishSpawnBelowWaterLevelHeight)
self.setPos(newX, newY, newZ)
def checkForEating(self, fishThatWillEat):
if (self.getX() < fishThatWillEat.getX() or not (fishThatWillEat.movingRight) or self.getX() > fishThatWillEat.getX() or fishThatWillEat.movingRight) and self.fsm.getCurrentOrNextState() == 'Swimming' and fishThatWillEat.fsm.getCurrentOrNextState() == 'Swimming' and random.random() < 1.0:
self.fsm.request('BeingEaten', fishThatWillEat)
fishThatWillEat.fsm.request('Eating', self.weight)
return True
return False
def startIdleBubbleEffect(self):
self.idleBubbleEffect = FishIdleBubbleEffect.getEffect(unlimited = True)
if self.idleBubbleEffect:
self.idleBubbleEffect.reparentTo(self.mouthJoint)
self.idleBubbleEffect.setScale(1.0)
self.idleBubbleEffect.setHpr(0, 0, 0)
self.idleBubbleEffect.setLifespanBasedOnDepth(self.getPos(render))
self.idleBubbleEffect.setBubbleSizeBasedOnWeight(self.weight)
self.idleBubbleEffect.particleDummy.setBin('fishingGame', 5)
self.idleBubbleEffect.startLoop()
def stopIdleBubbleEffect(self):
if self.idleBubbleEffect:
self.idleBubbleEffect.stopLoop()
self.idleBubbleEffect = None
def startBiteBubbleEffect(self):
self.biteBubbleEffect = FishBitingBubbleEffect.getEffect(unlimited = True)
if self.biteBubbleEffect:
self.biteBubbleEffect.reparentTo(self.mouthJoint)
self.biteBubbleEffect.setScale(1.0)
self.biteBubbleEffect.setHpr(0, 0, 0)
self.biteBubbleEffect.setLifespanBasedOnDepth(self.getPos(render))
self.biteBubbleEffect.setBubbleSizeBasedOnWeight(self.weight)
self.biteBubbleEffect.particleDummy.setBin('fishingGame', 5)
self.biteBubbleEffect.play()
def stopBiteBubbleEffect(self):
if self.biteBubbleEffect:
self.biteBubbleEffect.stopLoop()
self.biteBubbleEffect = None
def startFightBubbleEffect(self):
self.fightBubbleEffect = FishFightingHookedBubbleEffect.getEffect(unlimited = True)
if self.fightBubbleEffect:
self.fightBubbleEffect.reparentTo(self.mouthJoint)
self.fightBubbleEffect.setScale(1.0)
self.fightBubbleEffect.setHpr(0, 0, 0)
self.fightBubbleEffect.setLifespanBasedOnDepth(self.getPos(render))
self.fightBubbleEffect.setBubbleSizeBasedOnWeight(self.weight)
self.fightBubbleEffect.particleDummy.setBin('fishingGame', 5)
self.fightBubbleEffect.startLoop()
def stopFightBubbleEffect(self):
if self.fightBubbleEffect:
self.fightBubbleEffect.stopLoop()
self.fightBubbleEffect = None
def performStraightBehavior(self, dt, velocity, accel):
newX = self.getX() + velocity[0] * dt + accel[0] * dt * dt
newZ = self.getZ() + velocity[2] * dt + accel[2] * dt * dt
return (newX, newZ)
def performSineStraightBehavior(self, dt, velocity, accel):
self.sineDtAccumulator += dt
newX = self.getX() + velocity[0] * dt + accel[0] * dt * dt
newZ = self.myZ + math.sin(self.sineDtAccumulator) * self.myData['behaviorDict']['sineMultiplier']
return (newX, newZ)
def performScareAwayBehavior(self, dt, velocity, accel):
newX = self.getX() + velocity[0] * FishingGlobals.scareAwayVelocityMultiplier * dt + accel[0] * dt * dt
newZ = self.getZ() + velocity[2] * FishingGlobals.scareAwayVelocityMultiplier * dt + accel[2] * dt * dt
return (newX, newZ)
def performFleeBehavior(self, dt, velocity, accel):
newX = self.getX() + velocity[0] * FishingGlobals.fleeVelocityMultiplier * dt + accel[0] * dt * dt
newZ = self.getZ() + velocity[2] * FishingGlobals.fleeVelocityMultiplier * dt + accel[2] * dt * dt
return (newX, newZ)
def performErraticBehavior(self, dt, velocity, accel):
self.erraticDtAccumulator += dt
self.sineDtAccumulator += dt
newX = self.getX() + velocity[0] * dt + accel[0] * dt * dt
newZ = self.myZ + math.sin(self.sineDtAccumulator) * self.myData['behaviorDict']['sineMultiplier']
if self.erraticDtAccumulator > self.myData['behaviorDict']['secondsBetweenChanges']:
self.erraticDtAccumulator = 0
if random.random() < self.myData['behaviorDict']['chanceOfTurning']:
if self.fsm.getCurrentOrNextState() != 'TurnAround':
self.fsm.request('TurnAround', 'Swimming', not (self.movingRight))
return (newX, newZ)
def showAttractionCollisionVisuals(self):
if FishingGlobals.wantDebugCollisionVisuals:
self.attractionVisual.show()
def hideAttractionCollisionVisuals(self):
if FishingGlobals.wantDebugCollisionVisuals:
self.attractionVisual.hide()
def showAvoidanceCollisionVisuals(self):
if FishingGlobals.wantDebugCollisionVisuals:
self.collisionVisual.show()
def hideAvoidanceCollisionVisuals(self):
if FishingGlobals.wantDebugCollisionVisuals:
self.collisionVisual.hide()
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
e3f206db445900a12d1590b90961b0bbb3ef400f
|
a7347bd30b2bfc61ef2272f43dc6a583ca50ea85
|
/user/urls.py
|
506bdb0aaff919f997ebbd2ca504b5e5b3b2aac2
|
[
"MIT"
] |
permissive
|
Sergey19940808/blog
|
cb5a731a5b1af3e3ffe22d03ae554188b573051c
|
26beea5b218ddfe3347e251994c5c2f500975df0
|
refs/heads/master
| 2022-04-11T06:56:05.171087
| 2020-03-21T13:59:45
| 2020-03-21T13:59:45
| 248,254,285
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 230
|
py
|
from django.urls import path
from user.views import LogoutViewCustom, LoginViewCustom
urlpatterns = [
path('login/', LoginViewCustom.as_view(), name='login'),
path('logout/', LogoutViewCustom.as_view(), name='logout'),
]
|
[
"aleksey.serzh@mail.ru"
] |
aleksey.serzh@mail.ru
|
4d1688bc185f6d845f5ab2e1ca1310ebb6340586
|
150421fb8e7a913493f9ba0c37c85a624eec5bc4
|
/scripts/environment.py
|
7d17b09987bac88b2312aef2c799f54f05695691
|
[] |
no_license
|
SweiLz/Katana
|
19b50d59cf8b8f696a3abe699042087c4d61ff4a
|
2d7f70b452a736438b1e96e692cd9baf0439cf78
|
refs/heads/master
| 2022-09-21T04:08:36.984576
| 2020-06-03T16:59:00
| 2020-06-03T16:59:00
| 263,915,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,398
|
py
|
#!/usr/bin/env python
import math
import rospy
from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3
from std_msgs.msg import ColorRGBA, Header
from visualization_msgs.msg import Marker, MarkerArray
from tf.transformations import quaternion_from_euler
from partRing import CircleLight
import tf
class KineticEnvNode:
__br = tf.TransformBroadcaster()
__render_freq = 20.0
__update_freq = 30.0
__time = 0.0
def __init__(self):
rospy.init_node("kinetic_env_node")
rospy.loginfo("Starting KineticEnvNode as kinetic_env_node.")
self._circle_light = CircleLight()
self.marker_pub = rospy.Publisher(
"visualization_marker_array", MarkerArray, queue_size=10)
rospy.Timer(rospy.Duration(1.0 / self.__render_freq), self.__render)
rospy.Timer(rospy.Duration(1.0 / self.__update_freq), self.__update)
self.ringPose = [Pose(Point(0, 0, 0), Quaternion(0, 0, 0, 1)),
Pose(Point(0, 0, 0), Quaternion(0, 0, 0, 1)),
Pose(Point(0, 0, 0), Quaternion(0, 0, 0, 1))]
def __update(self, timer):
try:
duration = (timer.current_real - timer.last_real).to_sec()
# rospy.loginfo("Timer -> {}".format(duration))
# for i, ring_pose in enumerate(self.ringPose):
self.ringPose[0].position.z = 0.5 * \
math.sin(0.1*self.__time * 2 * math.pi) - 0.5
self.ringPose[1].position.z = 0.5 * \
math.sin(0.2*self.__time * 2 * math.pi) - 0.5
self.ringPose[2].position.z = 0.5 * \
math.sin(0.1*self.__time * 2 * math.pi) - 0.5
# self.ringPose[0].position.z = -0.5
# self.ringPose[1].position.z = -0.5
# self.ringPose[2].position.z = -0.5
q = quaternion_from_euler(0.3 * math.sin(0.3*self.__time * 2 * math.pi),
0.3 * math.cos(0.3*self.__time * 2 * math.pi), 0)
self.ringPose[0].orientation = Quaternion(q[0], q[1], q[2], q[3])
self.ringPose[1].orientation = Quaternion(q[0], q[1], q[2], q[3])
self.ringPose[2].orientation = Quaternion(q[0], q[1], q[2], q[3])
self.__time += duration
except Exception as e:
rospy.logwarn(e)
# pass
def __render(self, timer):
marker_array = MarkerArray()
for i, ring_pose in enumerate(self.ringPose):
translate = (ring_pose.position.x,
ring_pose.position.y, ring_pose.position.z)
rotate = (ring_pose.orientation.x, ring_pose.orientation.y,
ring_pose.orientation.z, ring_pose.orientation.w)
frame = "ring_link_{}".format(i)
self.__br.sendTransform(
translate, rotate, timer.current_real, frame, "base_link")
marker = Marker(type=Marker.MESH_RESOURCE)
marker.header = Header(frame_id=frame)
marker.id = i
radius = 0.0
if i == 0:
marker.mesh_resource = "package://Katana/meshes/ring_set1_s.stl"
marker.pose = Pose(Point(-0.5, 0.5, 0), Quaternion(0, 0, 0, 1))
radius = 0.5
elif i == 1:
marker.mesh_resource = "package://Katana/meshes/ring_set1_m.stl"
marker.pose = Pose(Point(-0.25, 0.25, 0),
Quaternion(0, 0, 0, 1))
radius = 0.75
elif i == 2:
marker.mesh_resource = "package://Katana/meshes/ring_set1_l.stl"
marker.pose = Pose(Point(0, 0, 0), Quaternion(0, 0, 0, 1))
radius = 1.0
marker.scale = Vector3(0.01, 0.01, 0.01)
marker.color = ColorRGBA(1.0, 1.0, 1.0, 1.0)
marker_array.markers.append(marker)
marker = Marker(type=Marker.SPHERE_LIST)
marker.header = Header(frame_id=frame)
marker.id = i+3
marker.pose = Pose(Point(0, 0, 0), Quaternion(0, 0, 0, 1))
marker.scale = Vector3(0.03, 0.03, 0.03)
num = 300.0*radius
for j in range(int(num)):
x = radius * math.sin(j/num*2*math.pi)
y = radius * math.cos(j/num*2*math.pi)
marker.points.append(Point(x, y, 0.015))
r = 0.5+0.5*math.sin(self.__time + j/num *
2*math.pi + i*2*math.pi/3)
g = 0.5+0.5*math.cos(self.__time + j/num *
2*math.pi + i*2*math.pi/3)
b = 0.5
marker.colors.append(ColorRGBA(r, g, b, 1.0))
x = (radius-0.1) * math.sin(j/num*2*math.pi)
y = (radius-0.1) * math.cos(j/num*2*math.pi)
marker.points.append(Point(x, y, 0.015))
r = 0.5+0.5*math.cos(self.__time + j/num *
2*math.pi + i*2*math.pi/3)
g = 0.5+0.5*math.sin(self.__time + j/num *
2*math.pi + i*2*math.pi/3)
b = 0.5
marker.colors.append(ColorRGBA(r, g, b, 1.0))
marker_array.markers.append(marker)
self.marker_pub.publish(marker_array)
if __name__ == "__main__":
kinetic_env_node = KineticEnvNode()
rospy.spin()
|
[
"sweilz.w@gmail.com"
] |
sweilz.w@gmail.com
|
df8c41080835a716f20b4d748167744ba9cd2f5b
|
2733eda1907dc0111a786d900b1e82ccf7871142
|
/scoop/scoop/settings.py
|
b477cc7cfa887fae70cdce3d8fc6cb7eb0468877
|
[] |
no_license
|
wavebyte/scoop
|
2924b9ac83f6f8082d8296512ca120cb4cf7c255
|
e49ef363662bda2c5b188d6a29109e27bd08f8fb
|
refs/heads/master
| 2016-09-15T21:06:25.795445
| 2013-06-21T19:38:37
| 2013-06-21T19:38:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,347
|
py
|
# Django settings for scoop project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '0agyvmqua)k%q9-#c2s7j^p-9^npm2&i9krdei)g9di$xq*obp'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'scoop.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'scoop.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"EDYQveZfaa"
] |
EDYQveZfaa
|
67c8641c62d8c8f95dcbba4065c39401bf989154
|
0049811de008a725024197d7dbd98c25b9eb8b49
|
/vim_edited_files/anagrams.py
|
c27b8db1c93c5ce91698b9a8276c27df40171a6e
|
[] |
no_license
|
matbc92/learning-python
|
4b7ad63f8029b7a4fb527549ef6fa671e923e66e
|
1174bd038187ae5225034685bf534a61888e2ba9
|
refs/heads/master
| 2020-04-07T13:59:23.603390
| 2019-07-25T05:13:42
| 2019-07-25T05:13:42
| 158,429,882
| 1
| 0
| null | 2018-11-21T10:57:10
| 2018-11-20T17:51:45
| null |
UTF-8
|
Python
| false
| false
| 523
|
py
|
def anagramSolution1(s1,s2):
if len(s1) != len(s2):
stillOK = False
pos1 = 0
stillOK = True
while pos1 < len(s1) and stillOK:
pos2 = 0
found = False
while pos2 < len(s2) and not found:
if s1[pos1] == s2[pos2]:
found = True
else:
pos2 = pos2 + 1
if found:
s2[pos2] = None
else:
stillOK = False
pos1 = pos1 + 1
return stillOK
print(anagramSolution1('ovo','oov'))
|
[
"matheusbc92@gmail.com"
] |
matheusbc92@gmail.com
|
475c1e992482bca16a1c82ce2ca9bea454f4f955
|
27b066d73041e5db85d861512973a429e14cceb2
|
/pygen.py
|
24957395e56c455fe1374b4fe38d421f57387350
|
[
"Apache-2.0"
] |
permissive
|
JLCarveth/PyPwGen
|
becb3a606d34592fbdca039953947701bae61c93
|
9e52a064b40544461c7b617d280d864099ed5470
|
refs/heads/master
| 2021-01-02T09:10:21.202144
| 2017-08-02T20:00:56
| 2017-08-02T20:00:56
| 99,151,198
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,987
|
py
|
from tkinter import *
from random import randint
def generate(num):
# Number of passwords to generate
num = int(num)
# Number of lines in the word file
lc = 2926
for i in range(num):
file = open("words.txt", "r")
r1 = randint(1, lc) # Line number of first word
r2 = randint(1, lc) # Line number of second word
r3 = randint(1, lc) # Line number of third word
w1 = "" # Word located at line number r1
w2 = "" # Word located at line number r2
w3 = "" # Word located at line number r3
r4 = str(randint(0,999)).zfill(3) # Random 3-digit number for the
# end of the password.
i = 1
# Go through the word file finding the cooresponding line numbers.
# line.strip() removes any tags (like '\n')
for line in file:
if i == r1:
w1 = line.strip()
if i == r2:
w2 = line.strip()
if i == r3:
w3 = line.strip()
i += 1
password = w1+w2+w3+r4
print(password)
file.close()
def main():
root = Tk()
frame = Frame(root)
# Number of Passwords to generate
passwordReturn = Spinbox(frame, from_=1, to=99)
genButton = Button(frame,
text="Generate",
width=20,
command= lambda: generate(passwordReturn.get()))
# Binds the enter key to the generate function
root.bind("<Return>", lambda x: generate(passwordReturn.get()))
genButton.grid(row=3, column=0, columnspan=2)
prLabel = Label(frame, text="# of passwords \nto generate: ")
prLabel.grid(row=1, column=0)
passwordReturn.grid(row=1, column=1)
frame.pack()
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
JLCarveth.noreply@github.com
|
8019cb07da2e8ca8a0a0de7b0e201dd7cb2293f2
|
076f75f633cd8364c43bbcd52521c4dc54a54ea5
|
/varlens/read_evidence/__init__.py
|
c89c94c4de82770342ccf84a6c72a34d73c017e4
|
[
"Apache-2.0"
] |
permissive
|
openvax/varlens
|
f01f236327d1492c7d6329d3247d7f3e33520b21
|
715d3ede5893757b2fcba4117515621bca7b1e5d
|
refs/heads/master
| 2021-09-07T17:22:18.721285
| 2018-02-26T19:59:28
| 2018-02-26T19:59:28
| 38,770,069
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
# Copyright (c) 2015. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This subpackage provides functionality for collecting and filtering aligned
sequencing reads from a BAM file, determining the alleles they suggest at
a locus, and assesing the evidence for particular variants.
In this subpackage, the records stored in the BAM file are referred to as
"alignments," whereas the term "read" may be more familiar. We use the term
"alignment" for consistency with the SAM specification, and since an
individual read from the sequencer may generate any number of alignments in
the case of chimeric alignments and secondary alignments.
'''
from .util import alignment_key, read_key
from .pileup import Pileup
from .pileup_element import PileupElement
from .pileup_collection import PileupCollection
__all__ = [
"PileupCollection",
"Pileup",
"PileupElement",
"alignment_key",
"read_key",
]
|
[
"timodonnell@gmail.com"
] |
timodonnell@gmail.com
|
699c1644eafec02ca225b0f6268eda2368d89dc2
|
5b07cad5da8f8360a9a158cb5b91e489b2b7e600
|
/day01_15/json_read_write.py
|
bda33c1139bd33d2785345c54bec209e1b0c1e88
|
[] |
no_license
|
xieziwei99/py100days
|
9a7a1003b3153e45bda632d742525628601d3a1c
|
1febb643a87c2a0aaad3a29b1662e1599aeac775
|
refs/heads/master
| 2023-04-10T16:51:53.602384
| 2023-04-01T11:25:08
| 2023-04-01T11:25:08
| 191,920,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
'''
Created on 2019年6月12日
用Python中的json模块就可以将字典或列表以JSON格式保存到文件中
@author: xieziwei99
'''
import json
def main():
mydict = {
'name': 'Jack',
'age': 20,
'qq': 123456789,
'friends': ['Kangkang', 'Lucy'],
'cars': [
{'brand': 'BYD', 'max_speed': 180},
{'brand': 'Audi', 'max_speed': 280},
{'brand': 'Benz', 'max_speed': 320}
]
}
with open('data.json', 'w', encoding='utf-8') as fp:
json.dump(mydict, fp)
print('Save complete')
if __name__ == '__main__':
main()
|
[
"xieziwei@bupt.edu.cn"
] |
xieziwei@bupt.edu.cn
|
1cb3f377768921ad08fad01b54ee4ecae37fd269
|
ad93c25175cb886e4c3ec098deddb6cac1541929
|
/vnpy/trader/app/algoTrading/algoEngine.py
|
164469012ebc96476dba48087e0d4fa29e7b39ff
|
[] |
no_license
|
rehylas/myTrader
|
b51a3bc7254876891147b1fd336f8f8fad5ada5b
|
a7685710d92165560ec46b8bde4ec45eb963fbc7
|
refs/heads/master
| 2020-04-05T19:30:30.834352
| 2018-11-13T03:22:52
| 2018-11-13T03:22:52
| 157,137,576
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,021
|
py
|
# encoding: UTF-8
'''
算法交易引擎
'''
from __future__ import division
import os
import importlib
from vnpy.event import Event
from vnpy.rpc import RpcServer
from vnpy.trader.vtEvent import EVENT_TIMER, EVENT_TICK, EVENT_ORDER, EVENT_TRADE
from vnpy.trader.vtConstant import (DIRECTION_LONG, DIRECTION_SHORT,
PRICETYPE_LIMITPRICE, PRICETYPE_MARKETPRICE,
OFFSET_OPEN, OFFSET_CLOSE,
OFFSET_CLOSETODAY, OFFSET_CLOSEYESTERDAY)
from vnpy.trader.vtObject import VtSubscribeReq, VtOrderReq, VtCancelOrderReq, VtLogData
from .algo import ALGO_DICT
EVENT_ALGO_LOG = 'eAlgoLog' # 算法日志事件
EVENT_ALGO_PARAM = 'eAlgoParam' # 算法参数事件
EVENT_ALGO_VAR = 'eAlgoVar' # 算法变量事件
EVENT_ALGO_SETTING = 'eAlgoSetting' # 算法配置事件
ALGOTRADING_DB_NAME = 'MyTrader_AlgoTrading_Db' # AlgoTrading数据库名
SETTING_COLLECTION_NAME = 'AlgoSetting' # 算法配置集合名
HISTORY_COLLECTION_NAME = 'AlgoHistory' # 算法历史集合名
########################################################################
class AlgoEngine(object):
"""算法交易引擎"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
""""""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.rpcServer = None
self.algoDict = {} # algoName:algo
self.orderAlgoDict = {} # vtOrderID:algo
self.symbolAlgoDict = {} # vtSymbol:algo set
self.settingDict = {} # settingName:setting
self.historyDict = {} # algoName:dict
self.registerEvent()
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.processTickEvent)
self.eventEngine.register(EVENT_TIMER, self.processTimerEvent)
self.eventEngine.register(EVENT_ORDER, self.processOrderEvent)
self.eventEngine.register(EVENT_TRADE, self.processTradeEvent)
#----------------------------------------------------------------------
def stop(self):
"""停止"""
if self.rpcServer:
self.rpcServer.stop()
#----------------------------------------------------------------------
def processTickEvent(self, event):
"""行情事件"""
tick = event.dict_['data']
l = self.symbolAlgoDict.get(tick.vtSymbol, None)
if l:
for algo in l:
algo.updateTick(tick)
#----------------------------------------------------------------------
def processOrderEvent(self, event):
"""委托事件"""
order = event.dict_['data']
algo = self.orderAlgoDict.get(order.vtOrderID, None)
if algo:
algo.updateOrder(order)
#----------------------------------------------------------------------
def processTradeEvent(self, event):
"""成交事件"""
trade = event.dict_['data']
algo = self.orderAlgoDict.get(trade.vtOrderID, None)
if algo:
algo.updateTrade(trade)
#----------------------------------------------------------------------
def processTimerEvent(self, event):
"""定时事件"""
for algo in self.algoDict.values():
algo.updateTimer()
#----------------------------------------------------------------------
def addAlgo(self, algoSetting):
"""新增算法"""
templateName = algoSetting['templateName']
algoClass = ALGO_DICT[templateName]
algo = algoClass.new(self, algoSetting)
self.algoDict[algo.algoName] = algo
return algo.algoName
#----------------------------------------------------------------------
def stopAlgo(self, algoName):
"""停止算法"""
if algoName in self.algoDict:
self.algoDict[algoName].stop()
del self.algoDict[algoName]
#----------------------------------------------------------------------
def stopAll(self):
"""全部停止"""
l = self.algoDict.keys()
for algoName in l:
self.stopAlgo(algoName)
#----------------------------------------------------------------------
def subscribe(self, algo, vtSymbol):
""""""
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
self.writeLog(u'%s订阅行情失败,找不到合约%s' %(algo.algoName, vtSymbol))
return
# 如果vtSymbol已存在于字典,说明已经订阅过
if vtSymbol in self.symbolAlgoDict:
s = self.symbolAlgoDict[vtSymbol]
s.add(algo)
return
# 否则需要添加到字典中并执行订阅
else:
s = set()
self.symbolAlgoDict[vtSymbol] = s
s.add(algo)
req = VtSubscribeReq()
req.symbol = contract.symbol
req.exchange = contract.exchange
self.mainEngine.subscribe(req, contract.gatewayName)
#----------------------------------------------------------------------
def sendOrder(self, algo, vtSymbol, direction, price, volume,
priceType=None, offset=None):
"""发单"""
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
self.writeLog(u'%s委托下单失败,找不到合约:%s' %(algo.algoName, vtSymbol))
vtSymbol = '.'.join([contract.symbol, contract.exchange])
req = VtOrderReq()
req.vtSymbol = vtSymbol
req.symbol = contract.symbol
req.exchange = contract.exchange
req.direction = direction
req.offset = OFFSET_CLOSETODAY
req.price = price
req.volume = volume
if priceType:
req.priceType = priceType
else:
req.priceType = PRICETYPE_LIMITPRICE
if offset:
req.offset = offset
else:
req.offset = OFFSET_OPEN
strData = 'symbol %s exchange %s price %.2f volume %d direction %s offset %s vtSymbol %s gatewayName %s'\
%(req.symbol,req.exchange,req.price,req.volume,req.direction,req.offset,req.vtSymbol,contract.gatewayName)
print 'currency productClass ',req.currency, req.productClass
# print 'sendOrder req:'
# print strData
vtOrderID = self.mainEngine.sendOrder(req, contract.gatewayName)
self.orderAlgoDict[vtOrderID] = algo
return vtOrderID
#----------------------------------------------------------------------
def buy(self, algo, vtSymbol, price, volume, priceType=None, offset=None):
"""买入"""
return self.sendOrder(algo, vtSymbol, DIRECTION_LONG, price, volume, priceType, offset)
#----------------------------------------------------------------------
def sell(self, algo, vtSymbol, price, volume, priceType=None, offset=None):
"""卖出"""
return self.sendOrder(algo, vtSymbol, DIRECTION_SHORT, price, volume, priceType, offset)
#----------------------------------------------------------------------
def cancelOrder(self, algo, vtOrderID):
"""撤单"""
order = self.mainEngine.getOrder(vtOrderID)
if not order:
self.writeLog(u'%s委托撤单失败,找不到委托:%s' %(algo.algoName, vtOrderID))
return
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.orderID = order.orderID
req.frontID = order.frontID
req.sessionID = order.sessionID
self.mainEngine.cancelOrder(req, order.gatewayName)
#----------------------------------------------------------------------
def writeLog(self, content, algo=None):
"""输出日志"""
log = VtLogData()
log.logContent = content
if algo:
log.gatewayName = algo.algoName
event = Event(EVENT_ALGO_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
#----------------------------------------------------------------------
def putVarEvent(self, algo, d):
"""更新变量"""
algoName = algo.algoName
d['algoName'] = algoName
event = Event(EVENT_ALGO_VAR)
event.dict_['data'] = d
self.eventEngine.put(event)
# RPC推送
if self.rpcServer:
self.rpcServer.publish('AlgoTrading', event)
# 保存数据到数据库
history = self.historyDict.setdefault(algoName, {})
history['algoName'] = algoName
history['var'] = d
self.mainEngine.dbUpdate(ALGOTRADING_DB_NAME,
HISTORY_COLLECTION_NAME,
history,
{'algoName': algoName},
True)
#----------------------------------------------------------------------
def putParamEvent(self, algo, d):
"""更新参数"""
algoName = algo.algoName
d['algoName'] = algoName
event = Event(EVENT_ALGO_PARAM)
event.dict_['data'] = d
self.eventEngine.put(event)
# RPC推送
if self.rpcServer:
self.rpcServer.publish('AlgoTrading', event)
# 保存数据到数据库
history = self.historyDict.setdefault(algoName, {})
history['algoName'] = algoName
history['param'] = d
self.mainEngine.dbUpdate(ALGOTRADING_DB_NAME,
HISTORY_COLLECTION_NAME,
history,
{'algoName': algoName},
True)
#----------------------------------------------------------------------
def getTick(self, algo, vtSymbol):
"""查询行情"""
tick = self.mainEngine.getTick(vtSymbol)
if not tick:
self.writeLog(u'%s查询行情失败,找不到报价:%s' %(algo.algoName, vtSymbol))
return
return tick
#----------------------------------------------------------------------
def getContract(self, algo, vtSymbol):
"""查询合约"""
contract = self.mainEngine.getContract(vtSymbol)
if not contract:
self.writeLog(u'%s查询合约失败,找不到报价:%s' %(algo.algoName, vtSymbol))
return
return contract
#----------------------------------------------------------------------
def saveAlgoSetting(self, algoSetting):
"""保存算法配置"""
settingName = algoSetting['settingName']
self.settingDict[settingName] = algoSetting
self.mainEngine.dbUpdate(ALGOTRADING_DB_NAME,
SETTING_COLLECTION_NAME,
algoSetting,
{'settingName': settingName},
True)
self.putSettingEvent(settingName, algoSetting)
#----------------------------------------------------------------------
def loadAlgoSetting(self):
"""加载算法配置"""
l = self.mainEngine.dbQuery(ALGOTRADING_DB_NAME,
SETTING_COLLECTION_NAME,
{},
'templateName')
for algoSetting in l:
settingName = algoSetting['settingName']
self.settingDict[settingName] = algoSetting
self.putSettingEvent(settingName, algoSetting)
#----------------------------------------------------------------------
def deleteAlgoSetting(self, algoSetting):
"""删除算法配置"""
settingName = algoSetting['settingName']
del self.settingDict[settingName]
self.mainEngine.dbDelete(ALGOTRADING_DB_NAME,
SETTING_COLLECTION_NAME,
{'settingName': settingName})
self.putSettingEvent(settingName, {})
#----------------------------------------------------------------------
def putSettingEvent(self, settingName, algoSetting):
"""发出算法配置更新事件"""
algoSetting['settingName'] = settingName
event = Event(EVENT_ALGO_SETTING)
event.dict_['data'] = algoSetting
self.eventEngine.put(event)
#----------------------------------------------------------------------
def startRpc(self, repPort, pubPort):
"""启动RPC服务"""
if self.rpcServer:
return
self.rpcServer = AlgoRpcServer(self, repPort, pubPort)
self.rpcServer.start()
self.writeLog(u'算法交易RPC服务启动成功,REP端口:%s,PUB端口:%s' %(repPort, pubPort))
########################################################################
class AlgoRpcServer(RpcServer):
"""算法交易RPC服务器"""
#----------------------------------------------------------------------
def __init__(self, engine, repPort, pubPort):
"""Constructor"""
self.engine = engine
repAddress = 'tcp://*:%s' %repPort
pubAddress = 'tcp://*:%s' %pubPort
super(AlgoRpcServer, self).__init__(repAddress, pubAddress)
self.register(self.engine.addAlgo)
self.register(self.engine.stopAlgo)
self.register(self.engine.stopAll)
|
[
"rehylas@sina.com"
] |
rehylas@sina.com
|
67a6cd3c74e0b7ba29414a82835a627ca0a69379
|
52d523f1008fd169927e0b28ad85b9923a53733b
|
/py/bot/subsystems/gear_manipulator.py
|
703ff8d5c3f5a5747a218aa3acb1f584079ddb36
|
[
"MIT"
] |
permissive
|
Team5045/2017-bot
|
66b60825104bd5b4a31a3f6a7acfdf0708f0ed87
|
e97902c7efa46b8c85245d389ce85e4274c7fd73
|
refs/heads/master
| 2021-03-27T20:15:21.598240
| 2017-10-03T16:08:47
| 2017-10-03T16:08:47
| 78,890,567
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 827
|
py
|
"""
gear_manipulator.py
=========
"""
from wpilib.command import Subsystem
from bot import config
from bot.utils.controlled_single_solenoid import ControlledSingleSolenoid
class GearManipulator(Subsystem):
def __init__(self, robot):
super().__init__()
self.robot = robot
self.articulating_plate_solenoid = ControlledSingleSolenoid(
config.GEAR_MANIPULATOR_ARTICULATING_PLATE_SOLENOID)
self.cup_solenoid = ControlledSingleSolenoid(
config.GEAR_MANIPULATOR_ARTICULATING_CUP_SOLENOID)
def open_cup(self):
self.cup_solenoid.deploy()
def close_cup(self):
self.cup_solenoid.retract()
def raise_plate(self):
self.articulating_plate_solenoid.retract()
def lower_plate(self):
self.articulating_plate_solenoid.deploy()
|
[
"theo@theopatt.com"
] |
theo@theopatt.com
|
c7344ca31373d9fa5f9820d193c051620abd6d7e
|
d2133a6ebb064526149c304ba5c3cfde49530e35
|
/src/link.py
|
bcfc5c36a34b54904c147f8d763e537dcdf09ddc
|
[] |
no_license
|
carlosgeos/in-da-hood
|
6470dc0075387ce3bec72fefa1aa14f0dc200e35
|
76963ef457ec4e084d729e18c9b4b77b17181f46
|
refs/heads/master
| 2020-04-04T17:43:30.290896
| 2018-12-13T00:39:56
| 2018-12-13T00:39:56
| 156,132,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,370
|
py
|
from copy import deepcopy
import numpy as np
def link(sim, f):
"""Using single or complete-linkage (determined by f), builds a tree
of clusters. Each index of the array tree is one level of the
tree, where we can find multiple clusters.
Takes the similarity matrix and objective function as inputs.
"""
tree = [{}]
for x in range(len(sim) - 1):
max_ij = np.unravel_index(np.nanargmax(sim), (len(sim), len(sim)))
keep = min(max_ij)
throw = max(max_ij)
for i in range(len(sim)):
if keep != i and throw != i:
sim[keep, i] = f(sim[keep, i], sim[throw, i])
sim[i, keep] = f(sim[i, keep], sim[i, throw])
if throw != i:
sim[throw, i] = np.NINF
sim[i, throw] = np.NINF
# Procedure to iteratively build the tree
before = tree[len(tree) - 1]
tree.append(deepcopy(before))
now = tree[len(tree) - 1]
if keep in now and throw in now:
now[keep].extend(now[throw])
now[keep].append(throw)
now.pop(throw)
elif keep in now:
now[keep].append(throw)
elif throw in now:
now[keep] = [throw]
now[keep].extend(now[throw])
now.pop(throw)
else:
now[keep] = [throw]
return tree
|
[
"crequena@ulb.ac.be"
] |
crequena@ulb.ac.be
|
52ef476b1981aa6063e3166e165d941d3290cd9e
|
0065e45684eebd03dde4c6b14cd0167a968d1783
|
/test_model.py
|
9f3619e0dc396fd00030d3ae9aa64a00cecceea1
|
[] |
no_license
|
AayushPanda/ship-detection
|
80c1cc9ef7c9d4ed321cb04aa2c48a568816ef67
|
9a554a99bad00b3d386af53c8dee07a11cda3425
|
refs/heads/master
| 2023-06-11T06:20:40.200555
| 2021-07-05T19:03:02
| 2021-07-05T19:03:02
| 372,941,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 607
|
py
|
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tensorflow import keras
from gen_mask import gen_mask
imageX = 768
imageY = 768
data = pd.DataFrame(pd.read_csv("data\\data_segmentations.csv"))
data_dir = "data\\images\\"
fileID = "000155de5.jpg"
model = keras.models.load_model('model.h5', compile=False)
# Test on an image
test_img = np.expand_dims(cv2.imread(data_dir + fileID, cv2.IMREAD_COLOR), axis=0)
prediction = model.predict(test_img)
plt.imshow(cv2.imread(data_dir + fileID, cv2.IMREAD_COLOR))
plt.show()
plt.imshow(np.squeeze(prediction))
plt.show()
|
[
"aayush.vinayak@gmail.com"
] |
aayush.vinayak@gmail.com
|
cc58eda1315f312dc41d8a40a9c8524936118565
|
733db916ed5bab4f6c620cf0c9bcce50d600b174
|
/draw_chessboard.py
|
007145677681c3c4c1ab2c2e9ac307239eebe1cb
|
[] |
no_license
|
simonfojtu/cv-utils
|
f78372c6aaa5980b936844ab510017434f73f898
|
5e33e52f1565d9153e1669e6bf4f61a9071dfcc0
|
refs/heads/master
| 2021-01-19T07:08:16.298508
| 2016-06-24T21:13:50
| 2016-06-24T21:13:50
| 60,371,811
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,819
|
py
|
#!/usr/bin/env python3
from PIL import Image, ImageDraw, ImageFont
from itertools import cycle
from operator import add
import argparse
from collections import deque
import numpy as np
import random
import math
from unique_grid import Grid
def draw(*, cols = 0, rows = 0, patch_size = 0, square_size = 0, dpi = 300):
"""
Draw chessboard wth optional circles grid
TODO: write grid identification number to the output file
"""
# A4 size paper
width = 8.3 # inch
height = 11.7 # inch
# number of squares
if cols == 0 or rows == 0:
if square_size == 0:
# default square size in pixels
square_size = int(dpi / 3)
cols = int(width * dpi // square_size)
rows = int(height * dpi // square_size)
else:
if cols == 0 or rows == 0:
print("Specify either both number of rows and cols or none of them")
return
if square_size == 0:
square_size = int(min(dpi * width / cols, dpi * height / rows))
print("Automatic square size = " + str(square_size) + "px")
width = cols * square_size / dpi
height = rows * square_size / dpi
assert(rows != 0)
assert(cols != 0)
assert(square_size != 0)
print("Drawing grid " + str(rows) + "x" + str(cols))
def square(i, j):
"Return the square corners, suitable for use in PIL drawings"
return (i * square_size, j * square_size,
(i + 1) * square_size-1, (j+1) * square_size-1)
def sq2ellipse(rect):
pad = int(square_size / 5)
return list(map(add, rect, (pad, pad, -pad, -pad)))
image = Image.new('L',
(int(width * dpi), int(height * dpi)),
(255) # white
)
if patch_size == 0:
for ps in range(4,6):
grid = Grid(rows, cols, ps)
grid.construct()
if grid.isValid():
break
else:
print("Failed to construct grid with given parameters")
return
else:
grid = Grid(rows, cols, patch_size)
grid.construct()
if not grid.isValid():
print("Failed to construct grid with given parameters")
return
grid.print()
draw_square = ImageDraw.Draw(image).rectangle
draw_ellipse = ImageDraw.Draw(image).ellipse
# top left is black
off = 0
for r in range(rows):
for c in range(cols):
color = 'black'
if (c + off) % 2 == 0:
draw_square(square(c, r), fill='black')
color = 'white'
# draw circles
if grid.grid[r,c] == 1:
draw_ellipse(sq2ellipse(square(c,r)), fill=color)
off = (off + 1) % 2
fnt = ImageFont.truetype('/usr/share/fonts/dejavu/DejaVuSerif.ttf', size=int(dpi/8))
text = "cols = %i, rows = %i, patch size = %i, square size = %i, dpi = %i" % (cols, rows, patch_size, square_size, dpi)
ImageDraw.Draw(image).text((int(dpi/30),dpi*height-int(dpi/6)), text, font=fnt, fill=(127))
return image
parser = argparse.ArgumentParser()
parser.add_argument("--rows","-r", help="number of rows", type=int, default=0)
parser.add_argument("--cols","-c", help="number of rows", type=int, default=0)
parser.add_argument("--patch","-p", help="size of patch of circles", type=int, default=0)
parser.add_argument("--square","-s", help="size of square (px)", type=int, default=0)
parser.add_argument("--dpi", help="dots per inch (DPI)", type=int, default=300)
parser.add_argument("--out","-o", help="output file", default="chessboard.png")
args = parser.parse_args()
chessboard = draw(cols = args.cols, rows = args.rows, patch_size = args.patch, square_size = args.square, dpi = args.dpi)
chessboard.save(args.out)
|
[
"simon.fojtu@gmail.com"
] |
simon.fojtu@gmail.com
|
b46a3fdd5d59671c96181bac6d3b137b533de060
|
38515045dbc990087c34a6847c6905020b5849b7
|
/micro-benchmark-key-errs/snippets/lists/list_str/main.py
|
1825ac736d55d3f9405417ad2e01cf30adabdc6d
|
[
"Apache-2.0"
] |
permissive
|
Tim-eyes/PyCG
|
9dbb7867e9f6037869ec6bf1218397d986638836
|
ef121c7ffb5ee06f4952fbe209b326bb7bf12647
|
refs/heads/master
| 2023-08-30T07:54:40.761229
| 2021-07-25T13:08:46
| 2021-07-25T13:08:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 54
|
py
|
d = {"a": "ab"}
lst = ["a", "b"]
d[lst[0]]
d[lst[1]]
|
[
"vitsalis@gmail.com"
] |
vitsalis@gmail.com
|
c672ded6f637cdfdd650981a2adfbaec57edb24f
|
d9f0dbec3cd7489031eeb627f40dd5b44cfec04a
|
/getBodyText.py
|
b67d86419d80757d2de542401db96b7b0e33185f
|
[] |
no_license
|
einslulu/Lipstick-Analysis-Milestone1
|
3c571c588952d280121dfeb888ff7efb539e02c9
|
a90024f45e8d63eebc894f923f21115d0d70a60f
|
refs/heads/master
| 2023-03-29T13:19:22.484798
| 2021-04-04T09:57:29
| 2021-04-04T09:57:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
#coding=utf-8
#python version:2.7
#authon: Karen
import urllib
from readability import Document
count = 0
errorNum = 0
# read file and store urls in a list
with open("data/urls_1.txt") as f:
urls = f.readlines()
urls = [x.strip() for x in urls]
for url in urls:
count = count + 1
print "----------------------------------" + str(count) + "----------------------------------"
try:
html = urllib.urlopen(url).read()
except Exception as e:
errorNum = errorNum + 1
print(e)
pass
doc = Document(html).summary() # get the main article of the page
print doc
print "---------------------------------- error number: " + str(errorNum) + " ----------------------------------"
|
[
"fangfei2353@126.com"
] |
fangfei2353@126.com
|
c326a0188dd460e5b74104a84ae50c8ffb26cc97
|
c4cf16066816692bd983d06b4399d26a33268a9d
|
/twython/api.py
|
44da1b2f82eca65ffeca02b22a36dbd7a16fe1ec
|
[] |
no_license
|
seantalts/weirdtwitster
|
09392cba281a15f79d04907bda96a683ef4aa552
|
025bbfb5e38c3589cc8f65523bb047f6deb65850
|
refs/heads/master
| 2021-01-10T20:46:43.496336
| 2013-08-29T04:17:14
| 2013-08-29T04:17:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,110
|
py
|
# -*- coding: utf-8 -*-
"""
twython.api
~~~~~~~~~~~
This module contains functionality for access to core Twitter API calls,
Twitter Authentication, and miscellaneous methods that are useful when
dealing with the Twitter API
"""
import requests
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth1, OAuth2
from . import __version__
from .advisory import TwythonDeprecationWarning
from .compat import json, urlencode, parse_qsl, quote_plus, str, is_py2
from .endpoints import EndpointsMixin
from .exceptions import TwythonError, TwythonAuthError, TwythonRateLimitError
from .helpers import _transparent_params
import warnings
warnings.simplefilter('always', TwythonDeprecationWarning) # For Python 2.7 >
class Twython(EndpointsMixin, object):
def __init__(self, app_key=None, app_secret=None, oauth_token=None,
oauth_token_secret=None, access_token=None, token_type='bearer',
oauth_version=1, api_version='1.1', client_args=None, auth_endpoint='authenticate'):
"""Instantiates an instance of Twython. Takes optional parameters for authentication and such (see below).
:param app_key: (optional) Your applications key
:param app_secret: (optional) Your applications secret key
:param oauth_token: (optional) When using **OAuth 1**, combined with oauth_token_secret to make authenticated calls
:param oauth_token_secret: (optional) When using **OAuth 1** combined with oauth_token to make authenticated calls
:param access_token: (optional) When using **OAuth 2**, provide a valid access token if you have one
:param token_type: (optional) When using **OAuth 2**, provide your token type. Default: bearer
:param oauth_version: (optional) Choose which OAuth version to use. Default: 1
:param api_version: (optional) Choose which Twitter API version to use. Default: 1.1
:param client_args: (optional) Accepts some requests Session parameters and some requests Request parameters.
See http://docs.python-requests.org/en/latest/api/#sessionapi and requests section below it for details.
[ex. headers, proxies, verify(SSL verification)]
:param auth_endpoint: (optional) Lets you select which authentication endpoint will use your application.
This will allow the application to have DM access if the endpoint is 'authorize'.
Default: authenticate.
"""
# API urls, OAuth urls and API version; needed for hitting that there API.
self.api_version = api_version
self.api_url = 'https://api.twitter.com/%s'
self.app_key = app_key
self.app_secret = app_secret
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
self.access_token = access_token
# OAuth 1
self.request_token_url = self.api_url % 'oauth/request_token'
self.access_token_url = self.api_url % 'oauth/access_token'
self.authenticate_url = self.api_url % ('oauth/%s' % auth_endpoint)
if self.access_token: # If they pass an access token, force OAuth 2
oauth_version = 2
self.oauth_version = oauth_version
# OAuth 2
if oauth_version == 2:
self.request_token_url = self.api_url % 'oauth2/token'
self.client_args = client_args or {}
default_headers = {'User-Agent': 'Twython v' + __version__}
if not 'headers' in self.client_args:
# If they didn't set any headers, set our defaults for them
self.client_args['headers'] = default_headers
elif 'User-Agent' not in self.client_args['headers']:
# If they set headers, but didn't include User-Agent.. set it for them
self.client_args['headers'].update(default_headers)
# Generate OAuth authentication object for the request
# If no keys/tokens are passed to __init__, auth=None allows for
# unauthenticated requests, although I think all v1.1 requests need auth
auth = None
if oauth_version == 1:
# User Authentication is through OAuth 1
if self.app_key is not None and self.app_secret is not None and \
self.oauth_token is None and self.oauth_token_secret is None:
auth = OAuth1(self.app_key, self.app_secret)
if self.app_key is not None and self.app_secret is not None and \
self.oauth_token is not None and self.oauth_token_secret is not None:
auth = OAuth1(self.app_key, self.app_secret,
self.oauth_token, self.oauth_token_secret)
elif oauth_version == 2 and self.access_token:
# Application Authentication is through OAuth 2
token = {'token_type': token_type, 'access_token': self.access_token}
auth = OAuth2(self.app_key, token=token)
self.client = requests.Session()
self.client.auth = auth
# Make a copy of the client args and iterate over them
# Pop out all the acceptable args at this point because they will
# Never be used again.
client_args_copy = self.client_args.copy()
for k, v in client_args_copy.items():
if k in ('cert', 'headers', 'hooks', 'max_redirects', 'proxies'):
setattr(self.client, k, v)
self.client_args.pop(k) # Pop, pop!
self._last_call = None
def __repr__(self):
return '<Twython: %s>' % (self.app_key)
def _request(self, url, method='GET', params=None, api_call=None):
"""Internal request method"""
method = method.lower()
params = params or {}
func = getattr(self.client, method)
params, files = _transparent_params(params)
requests_args = {}
for k, v in self.client_args.items():
# Maybe this should be set as a class variable and only done once?
if k in ('timeout', 'allow_redirects', 'stream', 'verify'):
requests_args[k] = v
if method == 'get':
requests_args['params'] = params
else:
requests_args.update({
'data': params,
'files': files,
})
try:
response = func(url, **requests_args)
except requests.RequestException as e:
raise TwythonError(str(e))
content = response.content.decode('utf-8')
# create stash for last function intel
self._last_call = {
'api_call': api_call,
'api_error': None,
'cookies': response.cookies,
'headers': response.headers,
'status_code': response.status_code,
'url': response.url,
'content': content,
}
# Wrap the json loads in a try, and defer an error
# Twitter will return invalid json with an error code in the headers
json_error = False
try:
try:
# try to get json
content = content.json()
except AttributeError:
# if unicode detected
content = json.loads(content)
except ValueError:
json_error = True
content = {}
if response.status_code > 304:
# If there is no error message, use a default.
errors = content.get('errors',
[{'message': 'An error occurred processing your request.'}])
if errors and isinstance(errors, list):
error_message = errors[0]['message']
else:
error_message = errors # pragma: no cover
self._last_call['api_error'] = error_message
ExceptionType = TwythonError
if response.status_code == 429:
# Twitter API 1.1, always return 429 when rate limit is exceeded
ExceptionType = TwythonRateLimitError # pragma: no cover
elif response.status_code == 401 or 'Bad Authentication data' in error_message:
# Twitter API 1.1, returns a 401 Unauthorized or
# a 400 "Bad Authentication data" for invalid/expired app keys/user tokens
ExceptionType = TwythonAuthError
raise ExceptionType(error_message,
error_code=response.status_code,
retry_after=response.headers.get('retry-after'))
# if we have a json error here, then it's not an official Twitter API error
if json_error and not response.status_code in (200, 201, 202): # pragma: no cover
raise TwythonError('Response was not valid JSON, unable to decode.')
return content
def request(self, endpoint, method='GET', params=None, version='1.1'):
"""Return dict of response received from Twitter's API
:param endpoint: (required) Full url or Twitter API endpoint (e.g. search/tweets)
:type endpoint: string
:param method: (optional) Method of accessing data, either GET or POST. (default GET)
:type method: string
:param params: (optional) Dict of parameters (if any) accepted the by Twitter API endpoint you are trying to access (default None)
:type params: dict or None
:param version: (optional) Twitter API version to access (default 1.1)
:type version: string
:rtype: dict
"""
# In case they want to pass a full Twitter URL
# i.e. https://api.twitter.com/1.1/search/tweets.json
if endpoint.startswith('http://') or endpoint.startswith('https://'):
url = endpoint
else:
url = '%s/%s.json' % (self.api_url % version, endpoint)
content = self._request(url, method=method, params=params, api_call=url)
return content
def get(self, endpoint, params=None, version='1.1'):
"""Shortcut for GET requests via :class:`request`"""
return self.request(endpoint, params=params, version=version)
def post(self, endpoint, params=None, version='1.1'):
"""Shortcut for POST requests via :class:`request`"""
return self.request(endpoint, 'POST', params=params, version=version)
def get_lastfunction_header(self, header, default_return_value=None):
"""Returns a specific header from the last API call
This will return None if the header is not present
:param header: (required) The name of the header you want to get the value of
Most useful for the following header information:
x-rate-limit-limit,
x-rate-limit-remaining,
x-rate-limit-class,
x-rate-limit-reset
"""
if self._last_call is None:
raise TwythonError('This function must be called after an API call. It delivers header information.')
return self._last_call['headers'].get(header, default_return_value)
def get_authentication_tokens(self, callback_url=None, force_login=False, screen_name=''):
"""Returns a dict including an authorization URL, ``auth_url``, to direct a user to
:param callback_url: (optional) Url the user is returned to after they authorize your app (web clients only)
:param force_login: (optional) Forces the user to enter their credentials to ensure the correct users account is authorized.
:param app_secret: (optional) If forced_login is set OR user is not currently logged in, Prefills the username input box of the OAuth login screen with the given value
:rtype: dict
"""
if self.oauth_version != 1:
raise TwythonError('This method can only be called when your OAuth version is 1.0.')
request_args = {}
if callback_url:
request_args['oauth_callback'] = callback_url
response = self.client.get(self.request_token_url, params=request_args)
if response.status_code == 401:
raise TwythonAuthError(response.content, error_code=response.status_code)
elif response.status_code != 200:
raise TwythonError(response.content, error_code=response.status_code)
request_tokens = dict(parse_qsl(response.content.decode('utf-8')))
if not request_tokens:
raise TwythonError('Unable to decode request tokens.')
oauth_callback_confirmed = request_tokens.get('oauth_callback_confirmed') == 'true'
auth_url_params = {
'oauth_token': request_tokens['oauth_token'],
}
if force_login:
auth_url_params.update({
'force_login': force_login,
'screen_name': screen_name
})
# Use old-style callback argument if server didn't accept new-style
if callback_url and not oauth_callback_confirmed:
auth_url_params['oauth_callback'] = self.callback_url
request_tokens['auth_url'] = self.authenticate_url + '?' + urlencode(auth_url_params)
return request_tokens
def get_authorized_tokens(self, oauth_verifier):
"""Returns a dict of authorized tokens after they go through the :class:`get_authentication_tokens` phase.
:param oauth_verifier: (required) The oauth_verifier (or a.k.a PIN for non web apps) retrieved from the callback url querystring
:rtype: dict
"""
if self.oauth_version != 1:
raise TwythonError('This method can only be called when your OAuth version is 1.0.')
response = self.client.get(self.access_token_url, params={'oauth_verifier': oauth_verifier})
authorized_tokens = dict(parse_qsl(response.content.decode('utf-8')))
if not authorized_tokens:
raise TwythonError('Unable to decode authorized tokens.')
return authorized_tokens # pragma: no cover
def obtain_access_token(self):
"""Returns an OAuth 2 access token to make OAuth 2 authenticated read-only calls.
:rtype: string
"""
if self.oauth_version != 2:
raise TwythonError('This method can only be called when your OAuth version is 2.0.')
data = {'grant_type': 'client_credentials'}
basic_auth = HTTPBasicAuth(self.app_key, self.app_secret)
try:
response = self.client.post(self.request_token_url,
data=data, auth=basic_auth)
content = response.content.decode('utf-8')
try:
content = content.json()
except AttributeError:
content = json.loads(content)
access_token = content['access_token']
except (KeyError, ValueError, requests.exceptions.RequestException):
raise TwythonAuthError('Unable to obtain OAuth 2 access token.')
else:
return access_token
@staticmethod
def construct_api_url(api_url, **params):
"""Construct a Twitter API url, encoded, with parameters
:param api_url: URL of the Twitter API endpoint you are attempting to construct
:param \*\*params: Parameters that are accepted by Twitter for the endpoint you're requesting
:rtype: string
Usage::
>>> from twython import Twython
>>> twitter = Twython()
>>> api_url = 'https://api.twitter.com/1.1/search/tweets.json'
>>> constructed_url = twitter.construct_api_url(api_url, q='python', result_type='popular')
>>> print constructed_url
https://api.twitter.com/1.1/search/tweets.json?q=python&result_type=popular
"""
querystring = []
params, _ = _transparent_params(params or {})
params = requests.utils.to_key_val_list(params)
for (k, v) in params:
querystring.append(
'%s=%s' % (Twython.encode(k), quote_plus(Twython.encode(v)))
)
return '%s?%s' % (api_url, '&'.join(querystring))
def search_gen(self, search_query, **params): # pragma: no cover
warnings.warn(
'This method is deprecated. You should use Twython.cursor instead. [eg. Twython.cursor(Twython.search, q=\'your_query\')]',
TwythonDeprecationWarning,
stacklevel=2
)
return self.cursor(self.search, q=search_query, **params)
def cursor(self, function, **params):
"""Returns a generator for results that match a specified query.
:param function: Instance of a Twython function (Twython.get_home_timeline, Twython.search)
:param \*\*params: Extra parameters to send with your request (usually parameters excepted by the Twitter API endpoint)
:rtype: generator
Usage::
>>> from twython import Twython
>>> twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
>>> results = twitter.cursor(twitter.search, q='python')
>>> for result in results:
>>> print result
"""
if not hasattr(function, 'iter_mode'):
raise TwythonError('Unable to create generator for Twython method "%s"' % function.__name__)
content = function(**params)
if not content:
raise StopIteration
if hasattr(function, 'iter_key'):
results = content.get(function.iter_key)
else:
results = content
for result in results:
yield result
if function.iter_mode == 'cursor' and content['next_cursor_str'] == '0':
raise StopIteration
try:
if function.iter_mode == 'id':
if not 'max_id' in params:
# Add 1 to the id because since_id and max_id are inclusive
if hasattr(function, 'iter_metadata'):
since_id = content[function.iter_metadata].get('since_id_str')
else:
since_id = content[0]['id_str']
params['since_id'] = (int(since_id) - 1)
elif function.iter_mode == 'cursor':
params['cursor'] = content['next_cursor_str']
except (TypeError, ValueError): # pragma: no cover
raise TwythonError('Unable to generate next page of search results, `page` is not a number.')
for result in self.cursor(function, **params):
yield result
@staticmethod
def unicode2utf8(text):
try:
if is_py2 and isinstance(text, str):
text = text.encode('utf-8')
except:
pass
return text
@staticmethod
def encode(text):
if is_py2 and isinstance(text, (str)):
return Twython.unicode2utf8(text)
return str(text)
@staticmethod
def html_for_tweet(tweet, use_display_url=True, use_expanded_url=False):
"""Return HTML for a tweet (urls, mentions, hashtags replaced with links)
:param tweet: Tweet object from received from Twitter API
:param use_display_url: Use display URL to represent link (ex. google.com, github.com). Default: True
:param use_expanded_url: Use expanded URL to represent link (e.g. http://google.com). Default False
If use_expanded_url is True, it overrides use_display_url.
If use_display_url and use_expanded_url is False, short url will be used (t.co/xxxxx)
"""
if 'retweeted_status' in tweet:
tweet = tweet['retweeted_status']
if 'entities' in tweet:
text = tweet['text']
entities = tweet['entities']
# Mentions
for entity in entities['user_mentions']:
start, end = entity['indices'][0], entity['indices'][1]
mention_html = '<a href="https://twitter.com/%(screen_name)s" class="twython-mention">@%(screen_name)s</a>'
text = text.replace(tweet['text'][start:end], mention_html % {'screen_name': entity['screen_name']})
# Hashtags
for entity in entities['hashtags']:
start, end = entity['indices'][0], entity['indices'][1]
hashtag_html = '<a href="https://twitter.com/search?q=%%23%(hashtag)s" class="twython-hashtag">#%(hashtag)s</a>'
text = text.replace(tweet['text'][start:end], hashtag_html % {'hashtag': entity['text']})
# Urls
for entity in entities['urls']:
start, end = entity['indices'][0], entity['indices'][1]
if use_display_url and entity.get('display_url') and not use_expanded_url:
shown_url = entity['display_url']
elif use_expanded_url and entity.get('expanded_url'):
shown_url = entity['expanded_url']
else:
shown_url = entity['url']
url_html = '<a href="%s" class="twython-url">%s</a>'
text = text.replace(tweet['text'][start:end], url_html % (entity['url'], shown_url))
return text
|
[
"sean@lot18.com"
] |
sean@lot18.com
|
02e2a5c9006adf8945f1324d99d009a124459bd9
|
495e58958122b964f81336db888502b6f8dec8de
|
/script.py
|
907e30d06796914df2a71aeeade420963a96eafa
|
[] |
no_license
|
ytwog/rsa-cipher
|
04710dd89cd59dfae9ab9c4d2f1355fc85450be4
|
9f93039228ceddd5cf0978867b752caaa90293ae
|
refs/heads/master
| 2022-09-10T21:44:03.649928
| 2020-06-01T17:09:36
| 2020-06-01T17:09:36
| 268,554,688
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,230
|
py
|
import PySimpleGUI as sg
import math
import sys
element_input = sg.Text('поле ввода.', size=(40,1), text_color='Black')
element_output = sg.Text('поле вывода.', size=(39,1), text_color='Black')
element_prv = sg.InputText(size=(10, 1))
element_pub = sg.InputText(size=(10, 1))
element_out = sg.Output(size=(88, 1))
element_arr = sg.InputText(size=(44,10))
element_text = sg.InputText(size=(44,10))
element_save = sg.FileSaveAs()
element_open = sg.FileBrowse()
str_output = ''
def translate_to_arr(str_inp):
output_str = ''
for a in range(len(str_inp)):
output_str += str(ord(str_inp[a]))
if a != len(str_inp) - 1:
output_str += ','
return output_str
def is_prime(n):
if n == 2:
return True
if n % 2 == 0 or n <= 1:
return False
sqr = int(math.sqrt(n)) + 1
for divisor in range(3, sqr, 2):
if n % divisor == 0:
return False
return True
def keygen(P, Q):
#example:
#p = 1009
#q = 2741
if not is_prime(P) or not is_prime(Q):
raise ValueError('P or Q were not prime')
x = 1
y = 1
eq = (P - 1) * (Q - 1) + 1
xy = x*y
while xy != eq:
x += 1
y = int(eq / x)
xy = x*y
print ("Публичный ключ: " + str(x))
print ("Приватный ключ: " + str(y))
print ("Число N (P*Q) : " + str(P*Q))
return [[x,P*Q], y]
def extract_info(flag):
i = sys.argv.index(flag) + 1
return sys.argv[i]
def cipher(N, key, message):
return pow(message, key, N)
layout = [
[
sg.Text('Входные данные:'),
element_input,
element_open,
sg.Button('Сбросить способ ввода'),
],
[
sg.Text('Выходные данные:'),
element_output,
element_save,
sg.Button('Сбросить способ вывода')
],
[
sg.Text('P = '),
sg.InputText(size=(10,1)),
sg.Text('Q = '),
sg.InputText(size=(10,1)),
sg.Button('Сгенерировать P и Q', enable_events='GENERATE'),
],
[
sg.Text('Public = '),
element_pub,
sg.Text('Private = '),
element_prv,
sg.Button('Сгенерировать ключи', enable_events='GENERATE'),
],
[
sg.Text('Исходный текст:', size=(38, 1)),
sg.Text('Представление в виде массива чисел:', size=(44, 1))
],
[
element_text,
element_arr
],
[
sg.Button('Перевести строку в численный массив', size=(38, 1)),
sg.Button('Перевести массив в строку, если возможно', size=(39, 1))
],
[
sg.Button('Зашифровать', size=(24,2)),
sg.Button('Расшифровать', size=(24,2))
],
[
sg.Text('Результат:'),
sg.Button('Сохранить в выходной файл', enable_events='SAVE')
],
[
element_out
],
[
sg.Text('Отчет:')
],
[
sg.Output(size=(88, 6))
],
[
sg.Text('Программа написана в 2020 году', text_color=('Blue'))
],
[
sg.Text('студентом группы КИ17-02/1б, Апанасенко В.В.', text_color=('Blue'))
]
]
window = sg.Window('RSA-Cipher-v1.0', layout)
while True: # The Event Loop
event, values = window.read()
print(event, values) #debug
if event in (None, 'Exit', 'Cancel'):
break
if event == 'Сгенерировать P и Q':
print('Генерация P и Q...')
print('Завершено')
if event == 'Сгенерировать ключи':
print('Генерация ключей...')
if(values[0] == '' or values[1] == ''):
print('Сначала введите или сгенерируйте P и Q')
continue
try:
resKey = keygen(int(values[0]), int(values[1]))
public_str = str(resKey[0][0])
public_str += ','
public_str += str(resKey[0][1])
private_str = str(resKey[1])
private_str += ','
private_str += str(resKey[0][1])
element_pub.update(public_str)
element_prv.update(private_str)
except:
print('Ошибка ввода или обработки')
finally:
print('Завершено')
if event == 'Зашифровать' or event == 'Расшифровать':
decrypt = (event == 'Расшифровать')
if(decrypt):
print('Расшифровка...')
key_p = str(values[3])
else:
print('Шифрование...')
key_p = str(values[2])
message = str(values[5])
print('Ключ: ' + key_p)
# Если ключ пустой:
if (values[2] == '' and not decrypt) or (values[3] == '' and decrypt):
print('Сначала введите или сгенерируйте ключи')
continue
if len(message) == 0:
# Получаем массив чисел из строки, если пользователь этого не делал
message = translate_to_arr(values[4])
element_arr.update(message)
if len(message) == 0:
print('Введите сообщение или выберите входной файл.')
continue
# Обработка массива чисел
try:
arr_int_str = message.split(',')
arr_int = {}
for a in range(len(arr_int_str)):
arr_int[a] = int(arr_int_str[a])
except:
print('Некорректный формат строки. Вводите его следующим образом:')
print('<int>,<int>,...,<int>,<int>, где <int> - целое число')
continue
try:
K = int(key_p.split(',')[0])
N = int(key_p.split(',')[1])
except:
print('Ошибка ввода или обработки')
continue
output_str = ''
for a in range(len(arr_int)):
output_str += str(cipher(N, K, arr_int[a]))
if a != len(arr_int)-1:
output_str += ','
print('Результат: ' + str(output_str))
element_out.update(output_str)
str_output = output_str
if event == 'Сохранить в выходной файл':
if values['Save As...'] == '':
print('Файл не выбран.')
if event == 'Сбросить способ ввода':
element_input.update('поле ввода')
if event == 'Сбросить способ вывода':
element_output.update('поле вывода')
values[1] = 'поле ввода'
element_save.update()
if event == 'Перевести строку в численный массив':
element_arr.update(translate_to_arr(values[4]))
if event == 'Перевести массив в строку, если возможно':
str_inp = str(values[5])
output_str = ''
try:
arr_int_str = str_inp.split(',')
arr_int = {}
for a in range(len(arr_int_str)):
arr_int[a] = int(arr_int_str[a])
except:
print('Некорректный формат строки. Вводите его следующим образом:')
print('<int>,<int>,...,<int>,<int>, где <int> - целое число')
continue
for a in range(len(arr_int)):
output_str += str(chr(arr_int[a]))
element_text.update(output_str)
window.close()
|
[
"ytwog@mail.ru"
] |
ytwog@mail.ru
|
9010c5581a9513a9f1a1c692ee84614ff7412ccd
|
d3f680630426ff3a63d564e78bb1480863a7f0f6
|
/services/web__lnr_dz_com.py
|
4e9cd508a6ccf0e2985a9ae0d53a175c9d3f117d
|
[] |
no_license
|
JesseWeinstein/NewsGrabber
|
09f9d567449e99ba211e4ba61b42c53276de235b
|
b431dc5f313d4718c6328aaaa97da1bc8e136023
|
refs/heads/master
| 2020-12-31T02:32:48.359448
| 2016-01-23T14:20:26
| 2016-01-23T14:20:26
| 48,966,133
| 1
| 0
| null | 2016-01-04T00:33:41
| 2016-01-04T00:33:41
| null |
UTF-8
|
Python
| false
| false
| 151
|
py
|
refresh = 5
version = 20160123.01
urls = ['http://www.lnr-dz.com/index.php']
regex = [r'^https?:\/\/[^\/]*lnr-dz\.com']
videoregex = []
liveregex = []
|
[
"Arkiver@hotmail.com"
] |
Arkiver@hotmail.com
|
e5a93417d030dd009b5d9cea1f0dfcc2bc9b69e0
|
14d779114d51067223c5d2b93903103a2def9769
|
/utils.py
|
7b3b2e0ad46a6d5556a0438675bf1c95aaff2c77
|
[
"MIT"
] |
permissive
|
PaParaZz1/vehicle-triplet-reid
|
475c1864cba82c8aedef2622802c7993823a5d42
|
588607b3a68cf3c41b9aa298e9dcb92e8d898507
|
refs/heads/master
| 2020-03-31T03:07:19.836698
| 2018-10-23T16:37:02
| 2018-10-23T16:37:02
| 151,853,634
| 4
| 0
|
MIT
| 2018-10-23T16:37:03
| 2018-10-06T15:30:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,120
|
py
|
import numpy as np
import tensorflow as tf
from tensorflow.python.client import device_lib
def show_stats(name, x):
print('Stats for {} | max {:.5e} | min {:.5e} | mean {:.5e}'.format(name, np.max(x), np.min(x), np.mean(x)))
def available_gpu_num():
local_device_protos = device_lib.list_local_devices()
return len([x.name for x in local_device_protos if x.device_type == 'GPU'])
def get_available_gpus():
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def make_parallel(fn, num_gpus, **kwargs):
in_splits = {}
for k, v in kwargs.items():
in_splits[k] = tf.split(v, num_gpus)
# print('in_splits {}'.format(in_splits))
out_splits = []
for i in range(num_gpus):
with tf.device(tf.DeviceSpec(device_type="GPU", device_index=i)):
with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
out_splits.append(fn(**{k : v[i] for k, v in in_splits.items()}))
# print('out_splits {}'.format(out_splits))
return tf.concat(out_splits, axis=0)
|
[
"ken.qrose@gmail.com"
] |
ken.qrose@gmail.com
|
19c60b3de6fe6db0447e297ddbb69cb655215a9e
|
699304aa5a8bd3b01b35dade02e105f3899cb054
|
/Web/venv/Scripts/pip-script.py
|
4eb62af5361cfe6e2cef4b1590a139e664d0893e
|
[] |
no_license
|
ZhaoWenjun123/qwer
|
b1087a70118fe6cb7cc49def66528bcd5c0b5ba7
|
c976ee1f03011272122fd1e07858e444b39e3a91
|
refs/heads/master
| 2020-11-28T18:54:58.642017
| 2019-12-24T07:56:56
| 2019-12-24T07:56:56
| 229,896,456
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
#!C:\Users\zwj18\PycharmProjects\Web\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
|
[
"56943171+ZhaoWenjun123@users.noreply.github.com"
] |
56943171+ZhaoWenjun123@users.noreply.github.com
|
796189ab5954626c9df5ef9728c5d2ce706937ad
|
9aaa39f200ee6a14d7d432ef6a3ee9795163ebed
|
/Algorithm/Python/205. Isomorphic Strings.py
|
3e0887c4dd516ad3f164a2ca7fd98a0abd1916f4
|
[] |
no_license
|
WuLC/LeetCode
|
47e1c351852d86c64595a083e7818ecde4131cb3
|
ee79d3437cf47b26a4bca0ec798dc54d7b623453
|
refs/heads/master
| 2023-07-07T18:29:29.110931
| 2023-07-02T04:31:00
| 2023-07-02T04:31:00
| 54,354,616
| 29
| 16
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 887
|
py
|
# -*- coding: utf-8 -*-
# @Author: WuLC
# @Date: 2016-06-30 17:54:03
# @Last modified by: WuLC
# @Last Modified time: 2016-06-30 17:54:11
# @Email: liangchaowu5@gmail.com
class Solution(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
s_index, t_index = self.count_index(s), self.count_index(t)
if len(s_index) != len(t_index):
return False
for i in xrange(len(s_index)):
if s_index[i] != t_index[i]:
return False
return True
def count_index(self, s):
char, index, count = {}, [], 0
for i in xrange(len(s)):
if s[i] not in char:
char[s[i]] = count
count += 1
index.append([])
index[char[s[i]]].append(i)
return index
|
[
"liangchaowu5@gmail.com"
] |
liangchaowu5@gmail.com
|
48e072dbbd46720fc1006d690b1cab539eff0b21
|
9feb0d4b99c035423eeac4639c65b79436418951
|
/common.py
|
3ac91eaa74fd3bcb159796b321f395b7f876874d
|
[] |
no_license
|
ianasushko/phonetics_bot
|
1b236c814e175607f3d218e42d9dfdb5034ee62f
|
cfb0d37784dc4a82f4a017981126d818b47f7af5
|
refs/heads/master
| 2023-02-08T14:17:38.753696
| 2020-12-30T10:44:09
| 2020-12-30T10:44:09
| 267,148,335
| 0
| 1
| null | 2020-12-30T10:44:10
| 2020-05-26T20:45:28
|
Python
|
UTF-8
|
Python
| false
| false
| 7,257
|
py
|
from random import choice, shuffle
import json
import os
from telebot import types
from bot import bot
dirpath = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(dirpath, 'comments.txt')
with open(path) as file: # открыть в главной ф
comments = file.readlines()
dirpath = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(dirpath, 'sounds.json')
BIG_DICT = json.load(open(path))
C_PLACE = BIG_DICT['C_PLACE']
C_WAY = BIG_DICT['C_WAY']
C_SOUNDNESS = BIG_DICT['C_SOUNDNESS']
C_EXTRA_ARTICULATION = BIG_DICT['C_EXTRA_ARTICULATION']
RUS_LAT = BIG_DICT['RUS_LAT']
ALL_CONSONANTS = BIG_DICT['ALL_CONSONANTS']
ALT_NAMES = BIG_DICT['ALT_NAMES']
# {'звук':[4 характеристики]}
dicts = [C_PLACE, C_WAY, C_EXTRA_ARTICULATION, C_SOUNDNESS]
database = {}
for d in dicts:
for char, sounds in d.items():
for s in sounds: # (звуки)
if s not in database:
database[s] = []
database[s].append(char)
def check_alternative_names(user_input): #ALT_NAMES={'щелевой':['фрикативный']}
for char_name, alt_names in ALT_NAMES.items():
if user_input in alt_names:
return char_name
def proceeding_input(raw_text):
corrected = raw_text.lower().replace('ё', 'е').strip()
return corrected
def printed_signs():
need_signs = input('Нужны ли Вам обозначения звуков - введите да/нет: ')
need_signs = proceeding_input(need_signs)
while need_signs != 'да' and need_signs != 'нет' and need_signs != '':
print(''.join(comments[17]))
need_signs = input()
if need_signs == 'да':
shuffle(ALL_CONSONANTS)
list_of_consonants = ' , '.join(ALL_CONSONANTS)
print(list_of_consonants)
def beautiful_mistakes(errors):
if errors == 1:
shown = 'Вы допустили ' + str(errors) + ' ошибку. Запомните:'
elif errors > 4:
shown = 'Вы допустили ' + str(errors) + ' ошибок. Запомните:'
else:
shown = 'Вы допустили ' + str(errors) + ' ошибки. Запомните:'
return shown
def ask_user(chat_id, question, options=None):
MAX_PER_ROW = 35
if options is not None:
# Берет список опций, представляет в ряд
# каждый ряд MAX_PER_ROW характеристик
# расстояние между кнопками - 4 символа
kb_buttons = [ types.KeyboardButton(o) for o in options ]
markup = types.ReplyKeyboardMarkup()
row_char_count = 0 #считает длину настоящего ряда в характеристиках
current_row = [] # содержит кнопки которые пойдут в настоящий ряд
for option, btn in zip(options, kb_buttons): # название опции и кнопка
c = len(option) + (4 if len(current_row) != 0 else 0) # с - длина настоящей опции
# (+ пробел перед ней, если не первая в ряду
if row_char_count + c > MAX_PER_ROW: # если добавляемая опция превысит размер ряда
if len(current_row) != 0: # ...и она не первая в настоящем ряду
markup.row(*current_row) # кладет опции на панель
current_row = [btn] # превысившую размер в reset.панели опцию на новый
row_char_count = c
else:
markup.row(btn) # положить в собсвтенный ряд
else:
current_row.append(btn) # если не привысит, продолжить добавлять
row_char_count += c
if len(current_row) != 0: # если что-то осталось в настоящем ряду, добавить
markup.row(*current_row)
bot.send_message(chat_id, question, reply_markup=markup)
else:
bot.send_message(chat_id, question)
class Task:
def __init__(self, chat_id): # 1 раз
self.chat_id = chat_id
self.past_mistakes = []
self.final_callback_fn = None
self.reset()
def reset(self):
if hasattr(self, 'mistakes'):
self.past_mistakes += self.mistakes
print(self.past_mistakes)
self.used = []
self.mistakes = []
self.points = 0
self.available = []
def ask_user(self, question, options=None):
if options is not None:
options = ['не знаю', *options]
ask_user(self.chat_id, question, options)
def send_sound(self, sound):
try:
sound_path = os.path.join(dirpath, 'sounds', sound + '.ogg')
sound_file = open(sound_path, 'rb')
bot.send_voice(self.chat_id, sound_file)
except FileNotFoundError:
pass
def put_mark(self, nums): # 5/5, 10 или 100%
text = '' # все, что нужно будет послать сообщением
for n in nums:
if n == '1':
m1 = int(self.points / 2)
mark1 = str(m1) + '/5'
text += f'Вы выполнили {mark1} заданий.'
if n == '2':
if self.points >= 8:
text += f'Поздравляю! Ваша оценка {self.points}'
else:
text += f'Ваша оценка {str(self.points)}. В следующий раз справитесь лучше!'
if n == '3':
m3 = self.points * 10
text += f'Задание выполнено на {str(m3)}%'
bot.send_message(self.chat_id, text)
def choose_answer(self):
answer = choice(list(database.keys()))
while answer in self.used:
answer = choice(list(database.keys()))
for k, v in database.items():
if database[answer][:2] == v[:2]:
self.used.append(k)
return answer
def get_answer(self):
if len(self.past_mistakes) > 0:
idx = choice(range(len(self.past_mistakes)))
return self.past_mistakes.pop(idx) # удаляет и возвращает заданию
else:
return self.choose_answer()
def continue_task(self):
if self.count < 5:
self.run_task(self.get_answer())
self.count += 1
else:
self.done_five_times()
def run_task_five_times(self, grading_nums):
self.count = 0
self.grading_nums = grading_nums
self.continue_task()
def after_task_run(self, callback_fn):
self.final_callback_fn = callback_fn
def done(self):
self.continue_task()
def done_five_times(self):
self.error()
self.put_mark(self.grading_nums)
self.reset()
if self.final_callback_fn is not None:
self.final_callback_fn()
|
[
"noreply@github.com"
] |
ianasushko.noreply@github.com
|
c0041f565f30feeecd70aafda6f0da1d7ca014b3
|
65b8d92b2236c6e73829eea7fc9f9ea2fc7f87bd
|
/ModelStructure/Costs/costFunctionDict.py
|
840a870efcb11eb4bac21a97baa575a93d2ac41c
|
[] |
no_license
|
andygaspar/Portogallo
|
8ae7a016ae52d66726297acb64a2804de4f3f862
|
015710b6737b6ef3719ea3225007cd5511b17767
|
refs/heads/master
| 2023-04-12T09:13:13.506429
| 2021-04-01T07:43:25
| 2021-04-01T07:43:25
| 330,452,351
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
from ModelStructure.modelStructure import ModelStructure
from ModelStructure.Flight.flight import Flight
from ModelStructure.Slot.slot import Slot
import pandas as pd
import os
at_gate = pd.read_csv("ModelStructure/Costs/costs_table_gate.csv", sep=" ")
types = at_gate["flight"].unique()
print("voli", types)
flightTypeDict = dict(zip(types, range(len(types))))
delay_range = list(at_gate.columns[1:].astype(int))
def get_interval(time):
for i in range(len(delay_range)-1):
if delay_range[i] <= time < delay_range[i+1]:
return i
return i
def compute(flight, slot):
delay = slot.time - flight.eta
delay = delay if delay >= 0 else 0
i = get_interval(delay)
y2 = at_gate[at_gate["flight"] == flight.type][str(delay_range[i+1])].values[0]
y1 = at_gate[at_gate["flight"] == flight.type][str(delay_range[i])].values[0]
x2 = delay_range[i+1]
x1 = delay_range[i]
return y1 + (delay - x1)*(y2 - y1)/(x2 - x1)
class CostFuns:
def __init__(self):
self.flightTypeDict = flightTypeDict
self.costFun = {
"linear": lambda flight, slot: flight.cost * (slot.time - flight.eta),
"quadratic": lambda flight, slot: (flight.cost * (slot.time - flight.eta) ** 2)/2,
"step": lambda flight, slot: 0 if slot.time - flight.eta < 0 else (slot.time - flight.eta) * flight.cost
if (slot.time - flight.eta) < flight.margin else
((slot.time - flight.eta) * flight.cost*10 + flight.cost * 30),
"realistic": lambda flight, slot: compute(flight, slot)
}
|
[
"andygaspar@libero.it"
] |
andygaspar@libero.it
|
f580f185fe640e2fe2593a65f43f4a8e11a05a14
|
8e1293fc2d61104984eb0e78d1aa6d247fa28742
|
/store/urls.py
|
bb05425a33ffee92f156e652f5b53f592e42c97f
|
[] |
no_license
|
jainrishabh18/ecommerce-site
|
31aabb93ebbc7c1cc441c1ee48e5929c37683f25
|
5447b7a356dfd32f534d9ebdb44af1fc7b4a35b1
|
refs/heads/master
| 2023-05-13T13:22:17.186460
| 2021-06-09T11:22:01
| 2021-06-09T11:22:01
| 370,991,318
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
#full code is self written here
from django.urls import path
from . import views
urlpatterns = [
path('',views.store,name="store"),
path('cart/',views.cart,name="cart"),
path('checkout/',views.checkout,name="checkout"),
path('update_item/', views.updateItem, name="update_item"),
]
|
[
"49811626+jainrishabh18@users.noreply.github.com"
] |
49811626+jainrishabh18@users.noreply.github.com
|
5853b4ab16142ca22a9022778d24c0aa3ca91d79
|
62f3cf009632ceb927414979cb90857c7cd4dffe
|
/web/pgadmin/tools/schema_diff/directory_compare.py
|
d99368686946ced69c303e78768718c7295f92f2
|
[
"PostgreSQL"
] |
permissive
|
xtremedata/pgadmin4
|
dd5f603f60231c67ad8e261ec3e450bb5df434ec
|
2cc4af9244b7e086c2ddfcde77e896f9f377ee12
|
refs/heads/master
| 2021-06-28T21:56:47.586979
| 2020-01-14T21:32:48
| 2020-01-14T21:32:48
| 233,937,206
| 0
| 0
|
NOASSERTION
| 2020-01-14T21:14:40
| 2020-01-14T21:14:39
| null |
UTF-8
|
Python
| false
| false
| 10,144
|
py
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Directory comparison"""
import copy
from pgadmin.tools.schema_diff.model import SchemaDiffModel
count = 1
def compare_dictionaries(source_dict, target_dict, node, node_label,
ignore_keys=None):
"""
This function will compare the two dictionaries.
:param source_dict: First Dictionary
:param target_dict: Second Dictionary
:param node: node type
:param ignore_keys: List of keys that will be ignored while comparing
:return:
"""
dict1 = copy.deepcopy(source_dict)
dict2 = copy.deepcopy(target_dict)
# Find the duplicate keys in both the dictionaries
dict1_keys = set(dict1.keys())
dict2_keys = set(dict2.keys())
intersect_keys = dict1_keys.intersection(dict2_keys)
# Keys that are available in source and missing in target.
source_only = []
added = dict1_keys - dict2_keys
global count
for item in added:
source_only.append({
'id': count,
'type': node,
'label': node_label,
'title': item,
'oid': source_dict[item]['oid'],
'status': SchemaDiffModel.COMPARISON_STATUS['source_only']
})
count += 1
target_only = []
# Keys that are available in target and missing in source.
removed = dict2_keys - dict1_keys
for item in removed:
target_only.append({
'id': count,
'type': node,
'label': node_label,
'title': item,
'oid': target_dict[item]['oid'],
'status': SchemaDiffModel.COMPARISON_STATUS['target_only']
})
count += 1
# Compare the values of duplicates keys.
identical = []
different = []
for key in intersect_keys:
# ignore the keys if available.
for ig_key in ignore_keys:
if ig_key in dict1[key]:
dict1[key].pop(ig_key)
if ig_key in dict2[key]:
dict2[key].pop(ig_key)
# Recursively Compare the two dictionary
if are_dictionaries_identical(dict1[key], dict2[key], ignore_keys):
identical.append({
'id': count,
'type': node,
'label': node_label,
'title': key,
'oid': source_dict[key]['oid'],
'source_oid': source_dict[key]['oid'],
'target_oid': target_dict[key]['oid'],
'status': SchemaDiffModel.COMPARISON_STATUS['identical']
})
else:
different.append({
'id': count,
'type': node,
'label': node_label,
'title': key,
'oid': source_dict[key]['oid'],
'source_oid': source_dict[key]['oid'],
'target_oid': target_dict[key]['oid'],
'status': SchemaDiffModel.COMPARISON_STATUS['different']
})
count += 1
return source_only + target_only + different + identical
def are_lists_identical(source_list, target_list, ignore_keys):
"""
This function is used to compare two list.
:param source_list:
:param target_list:
:return:
"""
if source_list is None or target_list is None or \
len(source_list) != len(target_list):
return False
else:
for index in range(len(source_list)):
# Check the type of the value if it is an dictionary then
# call are_dictionaries_identical() function.
if type(source_list[index]) is dict:
if not are_dictionaries_identical(source_list[index],
target_list[index],
ignore_keys):
return False
else:
if source_list[index] != target_list[index]:
return False
return True
def are_dictionaries_identical(source_dict, target_dict, ignore_keys):
"""
This function is used to recursively compare two dictionaries with
same keys.
:param source_dict:
:param target_dict:
:return:
"""
src_keys = set(source_dict.keys())
tar_keys = set(target_dict.keys())
# ignore the keys if available.
for ig_key in ignore_keys:
if ig_key in src_keys:
source_dict.pop(ig_key)
if ig_key in target_dict:
target_dict.pop(ig_key)
# Keys that are available in source and missing in target.
src_only = src_keys - tar_keys
# Keys that are available in target and missing in source.
tar_only = tar_keys - src_keys
# If number of keys are different in source and target then
# return False
if len(src_only) != len(tar_only):
return False
else:
# If number of keys are same but key is not present in target then
# return False
for key in src_only:
if key not in tar_only:
return False
for key in source_dict.keys():
if type(source_dict[key]) is dict:
if not are_dictionaries_identical(source_dict[key],
target_dict[key], ignore_keys):
return False
elif type(source_dict[key]) is list:
if not are_lists_identical(source_dict[key], target_dict[key],
ignore_keys):
return False
else:
if source_dict[key] != target_dict[key]:
return False
return True
def directory_diff(source_dict, target_dict, ignore_keys=[], difference={}):
"""
This function is used to recursively compare two dictionaries and
return the difference.
The difference is from source to target
:param source_dict: source dict
:param target_dict: target dict
:param ignore_keys: ignore keys to compare
"""
src_keys = set(source_dict.keys())
tar_keys = set(target_dict.keys())
# Keys that are available in source and missing in target.
src_only = src_keys - tar_keys
# Keys that are available in target and missing in source.
tar_only = tar_keys - src_keys
for key in source_dict.keys():
added = []
deleted = []
updated = []
source = None
# ignore the keys if available.
if key in ignore_keys:
pass
elif key in tar_only:
target_only[key] = target_dict[key]
# Target only values in deleted list
difference[key]['deleted'] = target_dict[key]
elif key in src_only:
# Source only values in the newly added list
if type(source_dict[key]) is list:
difference[key] = {}
difference[key]['added'] = source_dict[key]
elif type(source_dict[key]) is dict:
directory_diff(source_dict[key], target_dict[key],
ignore_keys, difference)
elif type(source_dict[key]) is list:
tmp_target = None
for index in range(len(source_dict[key])):
source = copy.deepcopy(source_dict[key][index])
if type(source) is list:
# TODO
pass
elif type(source) is dict:
if 'name' in source or 'colname' in source:
if type(target_dict[key]) is list and len(
target_dict[key]) > 0:
tmp = None
tmp_target = copy.deepcopy(target_dict[key])
for item in tmp_target:
if (
'name' in item and
item['name'] == source['name']
) or (
'colname' in item and
item['colname'] == source['colname']
):
tmp = copy.deepcopy(item)
if tmp and source != tmp:
updated.append(copy.deepcopy(source))
tmp_target.remove(tmp)
elif tmp and source == tmp:
tmp_target.remove(tmp)
elif tmp is None:
added.append(source)
else:
added.append(source)
difference[key] = {}
difference[key]['added'] = added
difference[key]['changed'] = updated
elif target_dict[key] is None or \
(type(target_dict[key]) is list and
len(target_dict[key]) < index and
source != target_dict[key][index]):
difference[key] = source
elif type(target_dict[key]) is list and\
len(target_dict[key]) > index:
difference[key] = source
if type(source) is dict and tmp_target and key in tmp_target and \
tmp_target[key] and len(tmp_target[key]) > 0:
if type(tmp_target[key]) is list and \
type(tmp_target[key][0]) is dict:
deleted = deleted + tmp_target[key]
else:
deleted.append({key: tmp_target[key]})
difference[key]['deleted'] = deleted
elif tmp_target and type(tmp_target) is list:
difference[key]['deleted'] = tmp_target
else:
if source_dict[key] != target_dict[key]:
difference[key] = source_dict[key]
return difference
|
[
"akshay.joshi@enterprisedb.com"
] |
akshay.joshi@enterprisedb.com
|
dd01c694794c02ce319a765f9efc0ae614598628
|
5ca09e822ae6c2365b8bfb1dc4b4534ca8538d6f
|
/LDA/ex_2_A_lda.py
|
63e48789d76807424ca2606430815bf26a9540d0
|
[] |
no_license
|
jkenjii/Topicos_Especias
|
7e9079c4b4193f0c9c4edaae92414f62ed4a17ee
|
ddb5a4d8b4db39d34dc448f8eac78b08826b9f6d
|
refs/heads/main
| 2023-02-02T18:26:25.599918
| 2020-12-11T04:21:11
| 2020-12-11T04:21:11
| 302,212,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,491
|
py
|
#4DIM -> PCA -> 2DIM -> LDA
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn import datasets
iris = datasets.load_iris()
M = iris.data
y = iris.target
print(M)
media_colunas = np.mean(M, axis=0)
print(media_colunas)
DataAdjust = M - media_colunas
print(DataAdjust)
matriz_cova = np.cov(DataAdjust, rowvar=False)
print(matriz_cova)
auto_VALORES, auto_VETORES = np.linalg.eig(matriz_cova)
idx = auto_VALORES.argsort()[::-1] #indice maior para menor
auto_VETORES = auto_VETORES[:, idx]
W = auto_VETORES[:, :2] #DIMENSÕES REDUZIDAS DO PCA
reduzida = M.dot(W)
print(reduzida)
print("Vetores: ", auto_VETORES)
print("Valores: ",auto_VALORES)
#plt.scatter(reduzida[:, 0],reduzida[:, 1], c=y)
target_names = iris.target_names
for i, target_name in zip([0, 1, 2], target_names):
plt.scatter(reduzida[y == i, 0], reduzida[y == i, 1],label=target_name)
plt.title('PCA')
plt.legend()
plt.show()
X = reduzida
Classes = np.unique(y)
media_a = [] #media por classe
for i in Classes:
media_a.append(np.mean(X[y == i], axis=0))
print("media_a",media_a)
media = np.mean(X, axis = 0) #media geral de cada atributo
print("media",media)
S_B = np.zeros((X.shape[1], X.shape[1])) #between-class scatter matrix
for i, media_a in enumerate(media_a):
n = X[y==i].shape[0]
media_a = media_a.reshape(1,X.shape[1]) #para linha
m = media_a - media
S_B = S_B + (n * np.matmul(m.T,m))
print(S_B)
x0 = X[y==0] #valores apenas da classe 0
x1 = X[y==1] #valores apenas da classe 1
x2 = X[y==2] #valores apenas da classe 2
conv0 = np.cov(x0.T) #matriz de conv da classe 0
conv1 = np.cov(x1.T) #matriz de conv da classe 1
conv2 = np.cov(x2.T) #matriz de conv da classe 2
S_W = conv0 + conv1 + conv2 #within-class scatter matrix(somatoriA das matrizes de conv de cada classe)
print(S_W)
VALORES, VETORES = np.linalg.eig(np.linalg.inv(S_W).dot(S_B)) #achar os autovetores e valores
idx = VALORES.argsort()[::-1] #indice maior para menor
VETORES = VETORES[:, idx]
print("vetores_lds",VETORES)
W = VETORES[:, :2] #DIMENSÕES REDUZIDAS DO LDA
print(W)
transformada = X.dot(W)
target_names = iris.target_names
for i, target_name in zip([0, 1, 2], target_names):
plt.scatter(transformada[y == i, 0], transformada[y == i, 1],label=target_name)
plt.legend()
plt.plot(np.linspace(-2.5,3.5),(np.linspace(-2.5,3.5)*VETORES[1][0]/VETORES[0][0]))
plt.title('LDA - 2DIM')
plt.show()
|
[
"noreply@github.com"
] |
jkenjii.noreply@github.com
|
34c01797f0282029df11bc22b388cb49fb6a09a6
|
503b6053e7ab860b3c7aa63c8d273cce355a810f
|
/signup/views.py
|
d44a7f6b1449c47420a76cb81bf6bcd2195798fb
|
[] |
no_license
|
Abdulrahman-ahmed25/NewsWebsite
|
2c97f797a6691b825a7af2e27ccb84ebdf87c03a
|
6a9d0a3fba4c2cd379869eb5b5b2cefe77f96588
|
refs/heads/main
| 2023-03-05T22:34:18.835943
| 2021-02-19T21:50:59
| 2021-02-19T21:50:59
| 340,489,298
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
from django.shortcuts import redirect, render
from .forms import RegisterForm
# Create your views here.
def register_view(response):
if response.method == 'POST':
form = RegisterForm(response.POST)
if form.is_valid():
form.save()
return redirect('/')
else:
form = RegisterForm()
return render(response, "registration/signup.html", {"form": form})
|
[
"abdulrahman.ahmed2544@gmail.com"
] |
abdulrahman.ahmed2544@gmail.com
|
2cd81fb6ee311c28a0da140a8657eb593b4c3da1
|
9f4fd09a41e0757e51faca7876e89b8e687c7305
|
/venv/bin/pyi-archive_viewer
|
b417a49a6df44c5b0b719685d0f361d9de179ae9
|
[] |
no_license
|
godisu524/pyqt_practice
|
573b88b456e7fe90f42f8259921035603e1bfb73
|
971990d9034570ad100d35550b25f75684ff6c37
|
refs/heads/main
| 2023-01-28T12:25:33.879026
| 2020-12-03T08:45:49
| 2020-12-03T08:45:49
| 318,129,234
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
#!/Users/1thefull/PycharmProjects/exchange_program/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from PyInstaller.utils.cliutils.archive_viewer import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
|
[
"godisu524@gamil.com"
] |
godisu524@gamil.com
|
|
0d1390ccdc5d2a49c85abd1457f0502cd1e7dfd5
|
ca471f6ed1bb070b49af70cfa72c2d917bbfb276
|
/venv/bin/pip3
|
35e8b8af0434a1726a30edfeca2b36511fec8597
|
[] |
no_license
|
marios-keri/anagram_project
|
8254d1279a519677ea414b7c52b93fda6959bad8
|
608184cace3d326a0406142cc5413d4f6f9f388e
|
refs/heads/master
| 2021-03-16T02:16:18.891635
| 2020-03-12T19:01:19
| 2020-03-12T19:01:19
| 246,895,581
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 400
|
#!/home/developer/back_end_projects/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
|
[
"jack.daniel44@outlook.com"
] |
jack.daniel44@outlook.com
|
|
b91659cf4878bc5be2dfaf898aaaa0832d4e6d7d
|
d97d82dfa85d5f2be0448ceb480cb1ba56bb4eb2
|
/src/graph.py
|
96cee5fcfafbe13f654ca3620db0134612632969
|
[] |
no_license
|
Aesthetician/alg_project
|
df2986dbb03d890383a44a45391642fb60899831
|
c19e23fb62151329cee7efbfd275f47e3743c551
|
refs/heads/master
| 2020-08-05T18:57:54.564732
| 2019-10-03T19:53:40
| 2019-10-03T19:53:40
| 212,666,693
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,064
|
py
|
import random
import math
MAXWEIGHT = 50
MINWEIGHT = 1
MAXDIST = math.inf
class Vertex:
def __init__(self, key):
self.id = key
self.connectedTo = {} # format: vertex -> weight
self.dist = MAXDIST
self.pred_id = -1
self.heap_idx = 0
self.color = 0 # 0: white, 1:black, 2: gray
def addNeighbor(self, nbr, weight = 0):
self.connectedTo[nbr] = weight
def getConnections(self):
return self.connectedTo.keys()
def getConnectionsID(self):
connList = []
for v in self.connectedTo:
connList.append(v.getId())
return connList
def getWeight(self, nbr):
return self.connectedTo[nbr]
def getId(self):
return self.id
def getDist(self):
return self.dist
def setDist(self, d):
self.dist = d
def getPred(self):
return self.pred_id
def setPredID(self, pid):
self.pred_id = pid
def getColor(self):
return self.color
def setColor(self, c):
self.color = c
class Graph:
def __init__(self):
self.vertList = {} # format: key(id) -> vertex
self.numVertices = 0
self.numEdges = 0
self.edgeList = []
def addVertex(self, key):
self.numVertices = self.numVertices + 1
newVertex = Vertex(key)
self.vertList[key] = newVertex
return newVertex
def getVertex(self, n):
if n in self.vertList:
return self.vertList[n]
else:
return None
# v and w are keys
def addEdge(self, v, w, weight = MINWEIGHT):
if v not in self.vertList:
nv = self.addVertex(v)
if w not in self.vertList:
nv = self.addVertex(w)
self.numEdges = self.numEdges + 1
self.vertList[v].addNeighbor(self.vertList[w], weight)
self.vertList[w].addNeighbor(self.vertList[v], weight)
def getVertices(self):
return self.vertList.keys()
def genWeight(self):
return random.randint(MINWEIGHT,MAXWEIGHT)
# check if u is neighbor of v
def isNeighbor(self, v, u):
if self.vertList[u] in self.vertList[v].connectedTo:
return True
else:
return False
def info(self):
print('# vertices: %d, # edges: %d' % (self.numVertices, self.numEdges))
print('degree: %f' % (2 * self.numEdges / self.numVertices))
def buildCycle(self, givenNumVertices):
urn = BallUrn(givenNumVertices)
crtvid = urn.drawBall()
self.addVertex(crtvid)
vid1st = crtvid
for i in range(givenNumVertices):
if i == (givenNumVertices - 1):
vid = vid1st
else:
vid = urn.drawBall()
self.addVertex(vid)
#weight = random.randint(MINWEIGHT,MAXWEIGHT)
weight = self.genWeight()
self.addEdge(crtvid, vid, weight)
crtvid = vid
#def __contains__(self,n):
# return n in self.vertList
def collectEdges(self):
self.edgeList = []
for vid in self.vertList.keys(): # list key
#print(vid)
v = self.vertList[vid]
for w in v.getConnections():
edge = [v.connectedTo[w], v.getId(), w.getId()] # weight, vid, wid
self.edgeList.append(edge)
def showEdge(self):
return print(self.edgeList)
def getEdgeList(self):
return self.edgeList
def __iter__(self):
return iter(self.vertList.values())
class BallUrn:
def __init__(self, num):
self.numBall = num
self.ballList = []
self.idxEnd = self.numBall - 1
self.iniBallList()
def iniBallList(self):
for i in range(0,self.numBall):
self.ballList.append(i)
def drawBall(self):
idx = random.randint(0,self.idxEnd)
draw = self.ballList[idx]
self.ballList[idx] = self.ballList[self.idxEnd]
self.numBall = self.numBall - 1
self.updateIdxEnd()
return draw
def updateIdxEnd(self):
self.idxEnd = self.numBall - 1
class Graph1(Graph):
def __init__(self, num, degree):
super(Graph1, self).__init__()
self.buildCycle(num)
self.buildEdges(degree)
def buildEdges(self, degree):
idxEnd = self.numVertices - 1
totalEdges = int (degree * self.numVertices / 2)
numAdded = totalEdges - self.numEdges
skip = 0
i = 0
while i < numAdded:
v = random.randint(0, idxEnd)
w = random.randint(0, idxEnd)
while v == w:
w = random.randint(0, idxEnd)
if self.isNeighbor(v, w):
skip = skip + 1
#print('v=%d, w=%d' % (v, w))
else:
self.addEdge(v, w, self.genWeight())
i = i + 1
print('i: %d, skip: %d' % (i, skip))
class Graph2(Graph):
def __init__(self, num, adjRate):
super(Graph2, self).__init__()
self.buildCycle(num)
self.buildEdges(adjRate)
def buildEdges(self, adjRate):
arr = []
for key in self.vertList:
arr.append(key)
sizeVert = len(arr)
total = 0
skip = 0
hit = 0
for i in range(0, sizeVert):
for j in range(i + 1, sizeVert):
p = random.random()
total = total + 1
if p < adjRate:
if self.vertList[arr[j]] in self.vertList[arr[i]].connectedTo:
#print('%d is adj of %d' % (arr[j], arr[i]))
skip = skip + 1
else:
#print(p)
hit = hit + 1
self.addEdge(arr[i], arr[j])
print('# of added: %d, total: %d, rate: %f, skip: %d' % (hit, total, hit/total, skip))
|
[
"admin@Zs-MacBook-Pro.local"
] |
admin@Zs-MacBook-Pro.local
|
55f5a74c3d40820adb172dd1cc886d295f8cf759
|
384bc4a7cd5a35bca8de83900aeb0664a097816a
|
/dbe/blog/migrations/0002_auto__add_post__add_comment.py
|
3e02cb730e2b9b4fb0c179ca33fd8fdb8af3ebbd
|
[] |
no_license
|
gzpgg3x/lightbirddjango1.5blog
|
86be058328c3cf79edba55241b71732c906823f6
|
8f38efff987680cdbc810b9b85bda8c8aaa35e52
|
refs/heads/master
| 2021-01-01T05:49:47.572387
| 2013-05-27T03:14:10
| 2013-05-27T03:14:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,625
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Post'
db.create_table(u'blog_post', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=60)),
('body', self.gf('django.db.models.fields.TextField')()),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'blog', ['Post'])
# Adding model 'Comment'
db.create_table(u'blog_comment', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author', self.gf('django.db.models.fields.CharField')(max_length=60, blank=True)),
('body', self.gf('django.db.models.fields.TextField')()),
('post', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='comments', null=True, to=orm['blog.Post'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'blog', ['Comment'])
def backwards(self, orm):
# Deleting model 'Post'
db.delete_table(u'blog_post')
# Deleting model 'Comment'
db.delete_table(u'blog_comment')
models = {
u'blog.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': u"orm['blog.Post']"})
},
u'blog.post': {
'Meta': {'ordering': "['-created']", 'object_name': 'Post'},
'body': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '60'})
}
}
complete_apps = ['blog']
|
[
"gzpgg3x@yahoo.com"
] |
gzpgg3x@yahoo.com
|
7950b70cacf658f1a2572b734bf23c5f6b7dbe42
|
3e55b75b7b461507eb7f145d018cae1fc9d29908
|
/mnistmodel.py
|
c7f07b047088fa3f7bcada709f2ad1f1fe798d18
|
[] |
no_license
|
tfdeepnet/testpack
|
4c46b6a5252a102f201ba974a7443e7143280e0b
|
e6091a46f6869214f9d5ec5ac302e2e14aff877c
|
refs/heads/master
| 2022-12-12T12:38:26.240446
| 2020-09-12T16:04:00
| 2020-09-12T16:04:00
| 293,485,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,633
|
py
|
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
class InitModelParams:
def __init__(self, num_splits = 1):
self.modelHyperParamDict = {"GBN_NUM_SPLITS": num_splits}
def getModelHyperParamDict(self):
return self.modelHyperParamDict
def clearModelHyperParamDict(self):
self.modelHyperParamDict.clear()
imp = InitModelParams()
class GhostBatchNorm(nn.BatchNorm2d):
"""
From : https://github.com/davidcpage/cifar10-fast/blob/master/bag_of_tricks.ipynb
Batch norm seems to work best with batch size of around 32. The reasons presumably have to do
with noise in the batch statistics and specifically a balance between a beneficial regularising effect
at intermediate batch sizes and an excess of noise at small batches.
Our batches are of size 512 and we can't afford to reduce them without taking a serious hit on training times,
but we can apply batch norm separately to subsets of a training batch. This technique, known as 'ghost' batch
norm, is usually used in a distributed setting but is just as useful when using large batches on a single node.
It isn't supported directly in PyTorch but we can roll our own easily enough.
"""
def __init__(self, num_features, num_splits, eps=1e-05, momentum=0.1, weight=True, bias=True):
super(GhostBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum)
self.weight.data.fill_(1.0)
self.bias.data.fill_(0.0)
self.weight.requires_grad = weight
self.bias.requires_grad = bias
self.num_splits = num_splits
self.register_buffer('running_mean', torch.zeros(num_features * self.num_splits))
self.register_buffer('running_var', torch.ones(num_features * self.num_splits))
def train(self, mode=True):
if (self.training is True) and (mode is False):
self.running_mean = torch.mean(self.running_mean.view(self.num_splits, self.num_features), dim=0).repeat \
(self.num_splits)
self.running_var = torch.mean(self.running_var.view(self.num_splits, self.num_features), dim=0).repeat \
(self.num_splits)
return super(GhostBatchNorm, self).train(mode)
def forward(self, input):
N, C, H, W = input.shape
if self.training or not self.track_running_stats:
return F.batch_norm(
input.view(-1, C * self.num_splits, H, W), self.running_mean, self.running_var,
self.weight.repeat(self.num_splits), self.bias.repeat(self.num_splits),
True, self.momentum, self.eps).view(N, C, H, W)
else:
return F.batch_norm(
input, self.running_mean[:self.num_features], self.running_var[:self.num_features],
self.weight, self.bias, False, self.momentum, self.eps)
class BNNet(nn.Module):
def __init__(self):
super(BNNet, self).__init__()
# Input Block
c_in = 1
c_out = 8
# print("running BN network")
self.convblock1 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(c_out),
nn.ReLU()
) # output_size = 26
# CONVOLUTION BLOCK 1
c_in = 8
c_out = 8
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(c_out),
nn.ReLU()
) # output_size = 24
c_in = 8
c_out = 8
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(c_out),
nn.ReLU()
) # output_size = 22
# TRANSITION BLOCK 1
self.pool1 = nn.MaxPool2d(2, 2) # output_size = 11
c_in = 8
c_out = 16
self.convblock4 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(c_out),
nn.ReLU()
) # output_size = 22
# CONVOLUTION BLOCK 2
c_in = 16
c_out = 16
self.convblock5 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(c_out),
nn.ReLU()
) # output_size = 9
c_in = 16
c_out = 16
self.convblock6 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(c_out),
nn.ReLU()
) # output_size = 7
c_in = 16
c_out = 10
self.convblock9 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(1, 1), padding=0, bias=False),
# nn.ReLU() NEVER!
) # output_size = 1
# OUTPUT BLOCK
self.gap = nn.Sequential(
nn.AvgPool2d(kernel_size=5)
) # output_size = 1
def forward(self, x):
x = self.convblock1(x)
x = self.convblock2(x)
x = self.convblock3(x)
x = self.pool1(x)
x = self.convblock4(x)
x = self.convblock5(x)
x = self.convblock6(x)
x = self.convblock9(x)
x = self.gap(x)
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1)
class GBNNet(nn.Module):
def __init__(self, gbn_splits=0):
super(GBNNet, self).__init__()
self.GBN_NUM_SPLITS = gbn_splits
# Input Block
c_in = 1
c_out = 8
# print("running GBN network")
self.convblock1 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
GhostBatchNorm(c_out, num_splits=self.GBN_NUM_SPLITS, weight=False),
nn.ReLU()
) # output_size = 26
# CONVOLUTION BLOCK 1
c_in = 8
c_out = 8
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
GhostBatchNorm(c_out, num_splits=self.GBN_NUM_SPLITS, weight=False),
nn.ReLU()
) # output_size = 24
c_in = 8
c_out = 8
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
GhostBatchNorm(c_out, num_splits=self.GBN_NUM_SPLITS, weight=False),
nn.ReLU()
) # output_size = 22
# TRANSITION BLOCK 1
self.pool1 = nn.MaxPool2d(2, 2) # output_size = 11
c_in = 8
c_out = 16
self.convblock4 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
GhostBatchNorm(c_out, num_splits=self.GBN_NUM_SPLITS, weight=False),
nn.ReLU()
) # output_size = 22
# CONVOLUTION BLOCK 2
c_in = 16
c_out = 16
self.convblock5 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
GhostBatchNorm(c_out, num_splits=self.GBN_NUM_SPLITS, weight=False),
nn.ReLU()
) # output_size = 9
c_in = 16
c_out = 16
self.convblock6 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(3, 3), padding=0, bias=False),
GhostBatchNorm(c_out, num_splits=self.GBN_NUM_SPLITS, weight=False),
nn.ReLU()
) # output_size = 7
c_in = 16
c_out = 10
self.convblock9 = nn.Sequential(
nn.Conv2d(in_channels=c_in, out_channels=c_out, kernel_size=(1, 1), padding=0, bias=False),
# nn.ReLU() NEVER!
) # output_size = 1
# OUTPUT BLOCK
self.gap = nn.Sequential(
nn.AvgPool2d(kernel_size=5)
) # output_size = 1
def forward(self, x):
x = self.convblock1(x)
x = self.convblock2(x)
x = self.convblock3(x)
x = self.pool1(x)
x = self.convblock4(x)
x = self.convblock5(x)
x = self.convblock6(x)
x = self.convblock9(x)
x = self.gap(x)
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1)
|
[
"deepchin@users.noreply.github.com"
] |
deepchin@users.noreply.github.com
|
ea7e891098362b4e74144062a751a6c89d0d85e0
|
d1778e304965215253df54503265fd06d16eb406
|
/setup.py
|
e696605b0f05a77ab92d9eb8e5a8204c8709c4ed
|
[] |
no_license
|
junwen29/GDELT
|
1e33b136cc54f3348e92f23036c68706253b0898
|
75d6a7596b23810db16cf62e361b88f5072b162c
|
refs/heads/master
| 2022-12-14T03:27:33.592742
| 2020-05-18T07:34:37
| 2020-05-18T07:34:37
| 181,330,796
| 0
| 1
| null | 2022-12-08T05:20:58
| 2019-04-14T15:39:15
|
Python
|
UTF-8
|
Python
| false
| false
| 325
|
py
|
from distutils.core import setup
setup(name='GDELTApp',
version='1.0',
description='GDELT event parser',
author='Jun Wen ',
author_email='tsang.jw@csit.gov.sg',
requires=['beautifulsoup4', 'goose3', 'numpy', 'requests',
'schedule', 'PyYAML', 'feedparser', 'datefinder']
)
|
[
"tsang.jw@csit.gov.sg"
] |
tsang.jw@csit.gov.sg
|
e0889589ddbcb23508b08e0c18ae3c5e4d2dac7a
|
5f2fef0a334a52a50891332f1d77bec08f8f1ce3
|
/alembic_versions/c9f4818e2073_add_queue.py
|
ccaa2215ac7a51a7925fd69b35ee6d2e2cc04794
|
[] |
no_license
|
vcslav-v/tgame_engine
|
8361782d558db315521b65ff720a5d1cc8898b5d
|
e43445acf5348333515d82ad5c400fde27ebe8af
|
refs/heads/master
| 2023-02-22T12:39:55.319935
| 2021-01-21T11:55:52
| 2021-01-21T11:55:52
| 305,366,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 549
|
py
|
"""add queue
Revision ID: c9f4818e2073
Revises: eb33ce5b86d2
Create Date: 2020-10-26 11:59:14.004296
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'c9f4818e2073'
down_revision = 'eb33ce5b86d2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
[
"donn.nus@gmail.com"
] |
donn.nus@gmail.com
|
3f4368c087dfdcf546ba400ce4d810d55676d758
|
64d9a011700705184c993d059048202dc9cb1aaa
|
/Lab4/powersoftwo.py
|
fd9f5104665d83c350d33f0b29cf00e0d17ff129
|
[] |
no_license
|
zuke0000/Python-Projects
|
6966a66454f03ff7372732e8dbfbddfbaac5ebcd
|
d5a145c5dfcfd3fd320fd5e79dd207eb545caf70
|
refs/heads/master
| 2020-08-15T15:08:34.343741
| 2019-10-15T17:49:57
| 2019-10-15T17:49:57
| 215,361,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 288
|
py
|
import sys
# Accept positive integer n as a command-line argument.
# Write to standard output a table showing the first n
# powers of two.
n = int(sys.argv[1])
power = 1
i = 0
while i <= n:
# Write the ith power of 2.
print(str(i) + ' ' + str(power))
power *= 2
i = i + 1
|
[
"noreply@github.com"
] |
zuke0000.noreply@github.com
|
85015462b7fad492dfee84c8518f18f49422d22c
|
63b3f91852b6ba722753154335ee3c56e7632f35
|
/Python Program Design By Dong Fu Guo/P1_PythonBasic_01_安装、版本.py
|
e6cd54aa73d9d550a8adfdb1a10be7ae36dd2509
|
[] |
no_license
|
yaoleistable/PythonBase
|
dbe94dc7d030eac8718cb8053ae4955e2253842d
|
4214deb90efbfe46b744f741ec055d4e8c7c677e
|
refs/heads/master
| 2020-08-10T17:59:55.391985
| 2019-10-28T13:10:02
| 2019-10-28T13:10:02
| 212,232,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 871
|
py
|
#!//miniconda3/envs/py36/bin/
# -*- coding: UTF-8 -*-
"""
@author: YaoLei
@license: (C) Copyright 2011-2019.
@contact: yaoleistable@gmail.com
@software: Lei
@file: P5_PythonBasic_05_内置函数1.py
@time: 2019/10/4 11:33 上午
"""
# 本代码集为学习《董付国老师Python系列教程》,学习资料来源于:https://www.bilibili.com/medialist/play/ml736518914
# --Python版本:选择自己喜欢的版本使用即可,一步不建议使用最新版本,使用成熟版本较好
# --Python安装:官网下载安装或使用Anaconda、minconda3(推荐,虚拟管理版本很方便)
# --Python官网:https://www.python.org/ ,官方文档说明:https://docs.python.org 中文文档:https://docs.python.org/zh-cn/3/
# --Anaconda官网:https://www.anaconda.com/
name = input("请输入你的名字:") # 输入
print(name) # 打印输出
|
[
"815141681@qq.com"
] |
815141681@qq.com
|
ad3e0607892cce2033280b983f86c3824f8ab91c
|
4055417ef944ecb748f68ca96263456dd6e7b06d
|
/tests/test_add_digits.py
|
bdb5e6c4760f7f53f7dbda76797af6062b56c3b5
|
[] |
no_license
|
python-workshop/fsdse-python-assignment-190
|
e56154112c75436196d882adf0f807bd6e5a0464
|
bb8db6a49e4a95f11e5504ed57671d573811244c
|
refs/heads/master
| 2020-12-30T12:43:45.949081
| 2017-05-23T06:26:59
| 2017-05-23T06:26:59
| 91,347,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
from unittest import TestCase
class TestAdd_digits(TestCase):
def test_add_digits(self):
try:
from build import add_digits
except ImportError:
self.assertFalse("no function found")
self.assertRaises(TypeError, add_digits, None)
self.assertRaises(ValueError, add_digits, -1)
self.assertEqual(add_digits(0), 0)
self.assertEqual(add_digits(9), 9)
self.assertEqual(add_digits(138), 3)
self.assertEqual(add_digits(65536), 7)
|
[
"sangam.angre@gmail.com"
] |
sangam.angre@gmail.com
|
8e83b2f83d6a6b624890e3e8cec95ee12b81ae66
|
ea10719a647bbc8968b3ea0ac4ddefef96a55095
|
/boschBackend/boschBackend/wsgi.py
|
b5d72cf6832fbac8b4ebf7bccd3110789c9e8cf9
|
[] |
no_license
|
DestroyerAlpha/BOSCH-Traffic-Sign-Recognition
|
d98ddf7772fbb13559eba0a87c888dedc9e893d8
|
6c2abaaf391eb812895fab988bf891baa59f3f2d
|
refs/heads/main
| 2023-03-25T07:29:28.761558
| 2021-03-25T18:13:59
| 2021-03-25T18:13:59
| 344,191,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for boschBackend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'boschBackend.settings')
application = get_wsgi_application()
|
[
"saisurya3127@gmail.com"
] |
saisurya3127@gmail.com
|
c765540cf530f57cf0c513a515921bff7c029c0b
|
7d32b2616250c6376591f671440577da266d5381
|
/nn/cfg/train_eem.cfg.py
|
ebea83edb1377e4ffcaaec35bfc4dbdb82fd5540
|
[] |
no_license
|
rmanzoni/plotter
|
4f3a3561d36e935b729adef7584e46a196166d74
|
65bf060d6e89b5f39f5297a8340c2533bf0ea254
|
refs/heads/master
| 2020-09-06T05:13:29.309928
| 2020-08-24T10:38:41
| 2020-08-24T10:38:41
| 220,333,423
| 2
| 2
| null | 2020-07-07T20:21:26
| 2019-11-07T21:37:12
|
Python
|
UTF-8
|
Python
| false
| false
| 2,398
|
py
|
import numpy as np
from NN.nn_trainer import Trainer
from plotter.selections import Selections
from plotter.utils import set_paths
from collections import OrderedDict
from os import environ as env
ch = 'eem'
set_paths(ch, 2018)
cuts = Selections(ch)
selection = [
cuts.selections['pt_iso'],
cuts.selections['baseline'],
cuts.selections['sideband'],
cuts.selections['vetoes_01_OS'],
]
composed_features = OrderedDict()
# composed_features['abs_l0_eta' ] = lambda df : np.abs(df.l0_eta)
composed_features['abs_l1_eta' ] = lambda df : np.abs(df.l1_eta)
composed_features['abs_l2_eta' ] = lambda df : np.abs(df.l2_eta)
# composed_features['log_abs_l0_dxy'] = lambda df : np.log10(np.abs(df.l0_dxy))
# composed_features['log_abs_l0_dz' ] = lambda df : np.log10(np.abs(df.l0_dz ))
# composed_features['log_abs_l1_dxy'] = lambda df : np.log10(np.abs(df.l1_dxy))
composed_features['log_abs_l1_dz' ] = lambda df : np.log10(np.abs(df.l1_dz ))
# composed_features['log_abs_l2_dxy'] = lambda df : np.log10(np.abs(df.l2_dxy))
composed_features['log_abs_l2_dz' ] = lambda df : np.log10(np.abs(df.l2_dz ))
composed_features['abs_q_01' ] = lambda df : np.abs(df.hnl_q_01)
trainer = Trainer (channel = ch,
base_dir = env['NTUPLE_DIR'],
#post_fix = 'HNLTreeProducer_%s/tree.root' %ch,
post_fix = 'HNLTreeProducer/tree.root',
features = [#'l0_pt' ,
'l1_pt' ,
'l2_pt' ,
'hnl_dr_12' ,
'hnl_m_12' ,
'sv_prob' ,
'hnl_2d_disp' ,],
composed_features = composed_features,
selection_data = selection,
selection_mc = selection + [cuts.selections['is_prompt_lepton']],
selection_tight = cuts.selections_pd['tight'],
lumi = 59700.,
# epochs = 100,
# early_stopping = False,
)
if __name__ == '__main__':
trainer.train()
pass
|
[
"riccardo.manzoni@cern.ch"
] |
riccardo.manzoni@cern.ch
|
ae3b769f624c601b93e58f949baae381c0111afc
|
09efb7c148e82c22ce6cc7a17b5140aa03aa6e55
|
/env/lib/python3.6/site-packages/plotly/graph_objs/treemap/__init__.py
|
7bb16f7662718e69b1ad538f7ed21d4fb3d06f27
|
[
"MIT"
] |
permissive
|
harryturr/harryturr_garmin_dashboard
|
53071a23b267116e1945ae93d36e2a978c411261
|
734e04f8257f9f84f2553efeb7e73920e35aadc9
|
refs/heads/master
| 2023-01-19T22:10:57.374029
| 2020-01-29T10:47:56
| 2020-01-29T10:47:56
| 235,609,069
| 4
| 0
|
MIT
| 2023-01-05T05:51:27
| 2020-01-22T16:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 114,539
|
py
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tiling(_BaseTraceHierarchyType):
# flip
# ----
@property
def flip(self):
"""
Determines if the positions obtained from solver are flipped on
each axis.
The 'flip' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y'] joined with '+' characters
(e.g. 'x+y')
Returns
-------
Any
"""
return self["flip"]
@flip.setter
def flip(self, val):
self["flip"] = val
# packing
# -------
@property
def packing(self):
"""
Determines d3 treemap solver. For more info please refer to
https://github.com/d3/d3-hierarchy#treemap-tiling
The 'packing' property is an enumeration that may be specified as:
- One of the following enumeration values:
['squarify', 'binary', 'dice', 'slice', 'slice-dice',
'dice-slice']
Returns
-------
Any
"""
return self["packing"]
@packing.setter
def packing(self, val):
self["packing"] = val
# pad
# ---
@property
def pad(self):
"""
Sets the inner padding (in px).
The 'pad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
# squarifyratio
# -------------
@property
def squarifyratio(self):
"""
When using "squarify" `packing` algorithm, according to https:/
/github.com/d3/d3-hierarchy/blob/master/README.md#squarify_rati
o this option specifies the desired aspect ratio of the
generated rectangles. The ratio must be specified as a number
greater than or equal to one. Note that the orientation of the
generated rectangles (tall or wide) is not implied by the
ratio; for example, a ratio of two will attempt to produce a
mixture of rectangles whose width:height ratio is either 2:1 or
1:2. When using "squarify", unlike d3 which uses the Golden
Ratio i.e. 1.618034, Plotly applies 1 to increase squares in
treemap layouts.
The 'squarifyratio' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["squarifyratio"]
@squarifyratio.setter
def squarifyratio(self, val):
self["squarifyratio"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
flip
Determines if the positions obtained from solver are
flipped on each axis.
packing
Determines d3 treemap solver. For more info please
refer to https://github.com/d3/d3-hierarchy#treemap-
tiling
pad
Sets the inner padding (in px).
squarifyratio
When using "squarify" `packing` algorithm, according to
https://github.com/d3/d3-hierarchy/blob/master/README.m
d#squarify_ratio this option specifies the desired
aspect ratio of the generated rectangles. The ratio
must be specified as a number greater than or equal to
one. Note that the orientation of the generated
rectangles (tall or wide) is not implied by the ratio;
for example, a ratio of two will attempt to produce a
mixture of rectangles whose width:height ratio is
either 2:1 or 1:2. When using "squarify", unlike d3
which uses the Golden Ratio i.e. 1.618034, Plotly
applies 1 to increase squares in treemap layouts.
"""
def __init__(
self, arg=None, flip=None, packing=None, pad=None, squarifyratio=None, **kwargs
):
"""
Construct a new Tiling object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Tiling
flip
Determines if the positions obtained from solver are
flipped on each axis.
packing
Determines d3 treemap solver. For more info please
refer to https://github.com/d3/d3-hierarchy#treemap-
tiling
pad
Sets the inner padding (in px).
squarifyratio
When using "squarify" `packing` algorithm, according to
https://github.com/d3/d3-hierarchy/blob/master/README.m
d#squarify_ratio this option specifies the desired
aspect ratio of the generated rectangles. The ratio
must be specified as a number greater than or equal to
one. Note that the orientation of the generated
rectangles (tall or wide) is not implied by the ratio;
for example, a ratio of two will attempt to produce a
mixture of rectangles whose width:height ratio is
either 2:1 or 1:2. When using "squarify", unlike d3
which uses the Golden Ratio i.e. 1.618034, Plotly
applies 1 to increase squares in treemap layouts.
Returns
-------
Tiling
"""
super(Tiling, self).__init__("tiling")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Tiling
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Tiling"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import tiling as v_tiling
# Initialize validators
# ---------------------
self._validators["flip"] = v_tiling.FlipValidator()
self._validators["packing"] = v_tiling.PackingValidator()
self._validators["pad"] = v_tiling.PadValidator()
self._validators["squarifyratio"] = v_tiling.SquarifyratioValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("flip", None)
self["flip"] = flip if flip is not None else _v
_v = arg.pop("packing", None)
self["packing"] = packing if packing is not None else _v
_v = arg.pop("pad", None)
self["pad"] = pad if pad is not None else _v
_v = arg.pop("squarifyratio", None)
self["squarifyratio"] = squarifyratio if squarifyratio is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Textfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Textfont object
Sets the font used for `textinfo`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Textfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Textfont
"""
super(Textfont, self).__init__("textfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Textfont
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Textfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import textfont as v_textfont
# Initialize validators
# ---------------------
self._validators["color"] = v_textfont.ColorValidator()
self._validators["colorsrc"] = v_textfont.ColorsrcValidator()
self._validators["family"] = v_textfont.FamilyValidator()
self._validators["familysrc"] = v_textfont.FamilysrcValidator()
self._validators["size"] = v_textfont.SizeValidator()
self._validators["sizesrc"] = v_textfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://plot.ly/settings for more details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Stream
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://plot.ly/settings for more
details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Stream
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Stream"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import stream as v_stream
# Initialize validators
# ---------------------
self._validators["maxpoints"] = v_stream.MaxpointsValidator()
self._validators["token"] = v_stream.TokenValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
self["maxpoints"] = maxpoints if maxpoints is not None else _v
_v = arg.pop("token", None)
self["token"] = token if token is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Pathbar(_BaseTraceHierarchyType):
# edgeshape
# ---------
@property
def edgeshape(self):
"""
Determines which shape is used for edges between `barpath`
labels.
The 'edgeshape' property is an enumeration that may be specified as:
- One of the following enumeration values:
['>', '<', '|', '\\']
- A string that matches one of the following regular expressions:
['']
Returns
-------
Any
"""
return self["edgeshape"]
@edgeshape.setter
def edgeshape(self, val):
self["edgeshape"] = val
# side
# ----
@property
def side(self):
"""
Determines on which side of the the treemap the `pathbar`
should be presented.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the font used inside `pathbar`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of plotly.graph_objs.treemap.pathbar.Textfont
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.treemap.pathbar.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of `pathbar` (in px). If not specified the
`pathbar.textfont.size` is used with 3 pixles extra padding on
each side.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [12, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# visible
# -------
@property
def visible(self):
"""
Determines if the path bar is drawn i.e. outside the trace
`domain` and with one pixel gap.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
edgeshape
Determines which shape is used for edges between
`barpath` labels.
side
Determines on which side of the the treemap the
`pathbar` should be presented.
textfont
Sets the font used inside `pathbar`.
thickness
Sets the thickness of `pathbar` (in px). If not
specified the `pathbar.textfont.size` is used with 3
pixles extra padding on each side.
visible
Determines if the path bar is drawn i.e. outside the
trace `domain` and with one pixel gap.
"""
def __init__(
self,
arg=None,
edgeshape=None,
side=None,
textfont=None,
thickness=None,
visible=None,
**kwargs
):
"""
Construct a new Pathbar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Pathbar
edgeshape
Determines which shape is used for edges between
`barpath` labels.
side
Determines on which side of the the treemap the
`pathbar` should be presented.
textfont
Sets the font used inside `pathbar`.
thickness
Sets the thickness of `pathbar` (in px). If not
specified the `pathbar.textfont.size` is used with 3
pixles extra padding on each side.
visible
Determines if the path bar is drawn i.e. outside the
trace `domain` and with one pixel gap.
Returns
-------
Pathbar
"""
super(Pathbar, self).__init__("pathbar")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Pathbar
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Pathbar"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import pathbar as v_pathbar
# Initialize validators
# ---------------------
self._validators["edgeshape"] = v_pathbar.EdgeshapeValidator()
self._validators["side"] = v_pathbar.SideValidator()
self._validators["textfont"] = v_pathbar.TextfontValidator()
self._validators["thickness"] = v_pathbar.ThicknessValidator()
self._validators["visible"] = v_pathbar.VisibleValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("edgeshape", None)
self["edgeshape"] = edgeshape if edgeshape is not None else _v
_v = arg.pop("side", None)
self["side"] = side if side is not None else _v
_v = arg.pop("textfont", None)
self["textfont"] = textfont if textfont is not None else _v
_v = arg.pop("thickness", None)
self["thickness"] = thickness if thickness is not None else _v
_v = arg.pop("visible", None)
self["visible"] = visible if visible is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Outsidetextfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Outsidetextfont object
Sets the font used for `textinfo` lying outside the sector.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.treemap.Outsidetextfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Outsidetextfont
"""
super(Outsidetextfont, self).__init__("outsidetextfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Outsidetextfont
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Outsidetextfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import outsidetextfont as v_outsidetextfont
# Initialize validators
# ---------------------
self._validators["color"] = v_outsidetextfont.ColorValidator()
self._validators["colorsrc"] = v_outsidetextfont.ColorsrcValidator()
self._validators["family"] = v_outsidetextfont.FamilyValidator()
self._validators["familysrc"] = v_outsidetextfont.FamilysrcValidator()
self._validators["size"] = v_outsidetextfont.SizeValidator()
self._validators["sizesrc"] = v_outsidetextfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if colorsis set to a
numerical array. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# cauto
# -----
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here colors) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect only if colorsis
set to a numerical array. Defaults to `false` when
`marker.cmin` and `marker.cmax` are set by the user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
# cmax
# ----
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
colorsis set to a numerical array. Value should have the same
units as colors and if set, `marker.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
# cmid
# ----
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if colorsis set to a numerical array. Value should
have the same units as colors. Has no effect when
`marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
# cmin
# ----
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
colorsis set to a numerical array. Value should have the same
units as colors and if set, `marker.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of plotly.graph_objs.treemap.marker.ColorBar
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of plotly.graph_objects.treemap.marker.
colorbar.Tickformatstop instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.treemap.marker.colorbar.tickformatstopdefault
s), sets the default property values to use for
elements of
treemap.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on plot.ly for
ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on plot.ly for
tickvals .
tickwidth
Sets the tick width (in px).
title
plotly.graph_objects.treemap.marker.colorbar.Ti
tle instance or dict with compatible properties
titlefont
Deprecated: Please use
treemap.marker.colorbar.title.font instead.
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
treemap.marker.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.treemap.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colors
# ------
@property
def colors(self):
"""
Sets the color of each sector of this trace. If not specified,
the default trace color set is used to pick the sector colors.
The 'colors' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["colors"]
@colors.setter
def colors(self, val):
self["colors"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if colorsis set to a
numerical array. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may be a palette
name string of the following list: Greys,YlGnBu,Greens,YlOrRd,B
luered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Blackbod
y,Earth,Electric,Viridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# colorssrc
# ---------
@property
def colorssrc(self):
"""
Sets the source reference on plot.ly for colors .
The 'colorssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorssrc"]
@colorssrc.setter
def colorssrc(self, val):
self["colorssrc"] = val
# depthfade
# ---------
@property
def depthfade(self):
"""
Determines if the sector colors are faded towards the
background from the leaves up to the headers. This option is
unavailable when a `colorscale` is present, defaults to false
when `marker.colors` is set, but otherwise defaults to true.
When set to "reversed", the fading direction is inverted, that
is the top elements within hierarchy are drawn with fully
saturated colors while the leaves are faded towards the
background color.
The 'depthfade' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed']
Returns
-------
Any
"""
return self["depthfade"]
@depthfade.setter
def depthfade(self, val):
self["depthfade"] = val
# line
# ----
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of plotly.graph_objs.treemap.marker.Line
- A dict of string/value properties that will be passed
to the Line constructor
Supported dict properties:
color
Sets the color of the line enclosing each
sector. Defaults to the `paper_bgcolor` value.
colorsrc
Sets the source reference on plot.ly for color
.
width
Sets the width (in px) of the line enclosing
each sector.
widthsrc
Sets the source reference on plot.ly for width
.
Returns
-------
plotly.graph_objs.treemap.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
# pad
# ---
@property
def pad(self):
"""
The 'pad' property is an instance of Pad
that may be specified as:
- An instance of plotly.graph_objs.treemap.marker.Pad
- A dict of string/value properties that will be passed
to the Pad constructor
Supported dict properties:
b
Sets the padding form the bottom (in px).
l
Sets the padding form the left (in px).
r
Sets the padding form the right (in px).
t
Sets the padding form the top (in px).
Returns
-------
plotly.graph_objs.treemap.marker.Pad
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if
colorsis set to a numerical array. If true, `marker.cmin` will
correspond to the last color in the array and `marker.cmax`
will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if colorsis set to a numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if colorsis set
to a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the default
palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or
mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here colors) or the
bounds set in `marker.cmin` and `marker.cmax` Has an
effect only if colorsis set to a numerical array.
Defaults to `false` when `marker.cmin` and
`marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if colorsis set to a numerical array. Value should
have the same units as colors and if set, `marker.cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if colorsis set to a
numerical array. Value should have the same units as
colors. Has no effect when `marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if colorsis set to a numerical array. Value should
have the same units as colors and if set, `marker.cmax`
must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objects.treemap.marker.ColorBar instance
or dict with compatible properties
colors
Sets the color of each sector of this trace. If not
specified, the default trace color set is used to pick
the sector colors.
colorscale
Sets the colorscale. Has an effect only if colorsis set
to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,P
ortland,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividi
s.
colorssrc
Sets the source reference on plot.ly for colors .
depthfade
Determines if the sector colors are faded towards the
background from the leaves up to the headers. This
option is unavailable when a `colorscale` is present,
defaults to false when `marker.colors` is set, but
otherwise defaults to true. When set to "reversed", the
fading direction is inverted, that is the top elements
within hierarchy are drawn with fully saturated colors
while the leaves are faded towards the background
color.
line
plotly.graph_objects.treemap.marker.Line instance or
dict with compatible properties
pad
plotly.graph_objects.treemap.marker.Pad instance or
dict with compatible properties
reversescale
Reverses the color mapping if true. Has an effect only
if colorsis set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the
array and `marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if colorsis set to a
numerical array.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
coloraxis=None,
colorbar=None,
colors=None,
colorscale=None,
colorssrc=None,
depthfade=None,
line=None,
pad=None,
reversescale=None,
showscale=None,
**kwargs
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Marker
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if colorsis set
to a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the default
palette will be chosen according to whether numbers in
the `color` array are all positive, all negative or
mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here colors) or the
bounds set in `marker.cmin` and `marker.cmax` Has an
effect only if colorsis set to a numerical array.
Defaults to `false` when `marker.cmin` and
`marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if colorsis set to a numerical array. Value should
have the same units as colors and if set, `marker.cmin`
must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if colorsis set to a
numerical array. Value should have the same units as
colors. Has no effect when `marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if colorsis set to a numerical array. Value should
have the same units as colors and if set, `marker.cmax`
must be set as well.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
plotly.graph_objects.treemap.marker.ColorBar instance
or dict with compatible properties
colors
Sets the color of each sector of this trace. If not
specified, the default trace color set is used to pick
the sector colors.
colorscale
Sets the colorscale. Has an effect only if colorsis set
to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may be a
palette name string of the following list: Greys,YlGnBu
,Greens,YlOrRd,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,P
ortland,Jet,Hot,Blackbody,Earth,Electric,Viridis,Cividi
s.
colorssrc
Sets the source reference on plot.ly for colors .
depthfade
Determines if the sector colors are faded towards the
background from the leaves up to the headers. This
option is unavailable when a `colorscale` is present,
defaults to false when `marker.colors` is set, but
otherwise defaults to true. When set to "reversed", the
fading direction is inverted, that is the top elements
within hierarchy are drawn with fully saturated colors
while the leaves are faded towards the background
color.
line
plotly.graph_objects.treemap.marker.Line instance or
dict with compatible properties
pad
plotly.graph_objects.treemap.marker.Pad instance or
dict with compatible properties
reversescale
Reverses the color mapping if true. Has an effect only
if colorsis set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the
array and `marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if colorsis set to a
numerical array.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Marker
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Marker"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import marker as v_marker
# Initialize validators
# ---------------------
self._validators["autocolorscale"] = v_marker.AutocolorscaleValidator()
self._validators["cauto"] = v_marker.CautoValidator()
self._validators["cmax"] = v_marker.CmaxValidator()
self._validators["cmid"] = v_marker.CmidValidator()
self._validators["cmin"] = v_marker.CminValidator()
self._validators["coloraxis"] = v_marker.ColoraxisValidator()
self._validators["colorbar"] = v_marker.ColorBarValidator()
self._validators["colors"] = v_marker.ColorsValidator()
self._validators["colorscale"] = v_marker.ColorscaleValidator()
self._validators["colorssrc"] = v_marker.ColorssrcValidator()
self._validators["depthfade"] = v_marker.DepthfadeValidator()
self._validators["line"] = v_marker.LineValidator()
self._validators["pad"] = v_marker.PadValidator()
self._validators["reversescale"] = v_marker.ReversescaleValidator()
self._validators["showscale"] = v_marker.ShowscaleValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
self["autocolorscale"] = autocolorscale if autocolorscale is not None else _v
_v = arg.pop("cauto", None)
self["cauto"] = cauto if cauto is not None else _v
_v = arg.pop("cmax", None)
self["cmax"] = cmax if cmax is not None else _v
_v = arg.pop("cmid", None)
self["cmid"] = cmid if cmid is not None else _v
_v = arg.pop("cmin", None)
self["cmin"] = cmin if cmin is not None else _v
_v = arg.pop("coloraxis", None)
self["coloraxis"] = coloraxis if coloraxis is not None else _v
_v = arg.pop("colorbar", None)
self["colorbar"] = colorbar if colorbar is not None else _v
_v = arg.pop("colors", None)
self["colors"] = colors if colors is not None else _v
_v = arg.pop("colorscale", None)
self["colorscale"] = colorscale if colorscale is not None else _v
_v = arg.pop("colorssrc", None)
self["colorssrc"] = colorssrc if colorssrc is not None else _v
_v = arg.pop("depthfade", None)
self["depthfade"] = depthfade if depthfade is not None else _v
_v = arg.pop("line", None)
self["line"] = line if line is not None else _v
_v = arg.pop("pad", None)
self["pad"] = pad if pad is not None else _v
_v = arg.pop("reversescale", None)
self["reversescale"] = reversescale if reversescale is not None else _v
_v = arg.pop("showscale", None)
self["showscale"] = showscale if showscale is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Insidetextfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Insidetextfont object
Sets the font used for `textinfo` lying inside the sector.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Insidetextfont
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Insidetextfont
"""
super(Insidetextfont, self).__init__("insidetextfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Insidetextfont
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Insidetextfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import insidetextfont as v_insidetextfont
# Initialize validators
# ---------------------
self._validators["color"] = v_insidetextfont.ColorValidator()
self._validators["colorsrc"] = v_insidetextfont.ColorsrcValidator()
self._validators["family"] = v_insidetextfont.FamilyValidator()
self._validators["familysrc"] = v_insidetextfont.FamilysrcValidator()
self._validators["size"] = v_insidetextfont.SizeValidator()
self._validators["sizesrc"] = v_insidetextfont.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on plot.ly for align .
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on plot.ly for bgcolor .
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on plot.ly for bordercolor .
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly.graph_objs.treemap.hoverlabel.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on plot.ly for color
.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for
family .
size
sizesrc
Sets the source reference on plot.ly for size
.
Returns
-------
plotly.graph_objs.treemap.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on plot.ly for namelength .
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Hoverlabel
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on plot.ly for align .
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on plot.ly for bgcolor .
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on plot.ly for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on plot.ly for namelength .
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Hoverlabel
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Hoverlabel"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import hoverlabel as v_hoverlabel
# Initialize validators
# ---------------------
self._validators["align"] = v_hoverlabel.AlignValidator()
self._validators["alignsrc"] = v_hoverlabel.AlignsrcValidator()
self._validators["bgcolor"] = v_hoverlabel.BgcolorValidator()
self._validators["bgcolorsrc"] = v_hoverlabel.BgcolorsrcValidator()
self._validators["bordercolor"] = v_hoverlabel.BordercolorValidator()
self._validators["bordercolorsrc"] = v_hoverlabel.BordercolorsrcValidator()
self._validators["font"] = v_hoverlabel.FontValidator()
self._validators["namelength"] = v_hoverlabel.NamelengthValidator()
self._validators["namelengthsrc"] = v_hoverlabel.NamelengthsrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
self["align"] = align if align is not None else _v
_v = arg.pop("alignsrc", None)
self["alignsrc"] = alignsrc if alignsrc is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bgcolorsrc", None)
self["bgcolorsrc"] = bgcolorsrc if bgcolorsrc is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("bordercolorsrc", None)
self["bordercolorsrc"] = bordercolorsrc if bordercolorsrc is not None else _v
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("namelength", None)
self["namelength"] = namelength if namelength is not None else _v
_v = arg.pop("namelengthsrc", None)
self["namelengthsrc"] = namelengthsrc if namelengthsrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Domain(_BaseTraceHierarchyType):
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this treemap trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this treemap trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this treemap trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this treemap trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "treemap"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this treemap trace .
row
If there is a layout grid, use the domain for this row
in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap trace (in
plot fraction).
y
Sets the vertical domain of this treemap trace (in plot
fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.treemap.Domain
column
If there is a layout grid, use the domain for this
column in the grid for this treemap trace .
row
If there is a layout grid, use the domain for this row
in the grid for this treemap trace .
x
Sets the horizontal domain of this treemap trace (in
plot fraction).
y
Sets the vertical domain of this treemap trace (in plot
fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Domain
constructor must be a dict or
an instance of plotly.graph_objs.treemap.Domain"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.treemap import domain as v_domain
# Initialize validators
# ---------------------
self._validators["column"] = v_domain.ColumnValidator()
self._validators["row"] = v_domain.RowValidator()
self._validators["x"] = v_domain.XValidator()
self._validators["y"] = v_domain.YValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
self["column"] = column if column is not None else _v
_v = arg.pop("row", None)
self["row"] = row if row is not None else _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = [
"Domain",
"Hoverlabel",
"Insidetextfont",
"Marker",
"Outsidetextfont",
"Pathbar",
"Stream",
"Textfont",
"Tiling",
"hoverlabel",
"marker",
"pathbar",
]
from plotly.graph_objs.treemap import pathbar
from plotly.graph_objs.treemap import marker
from plotly.graph_objs.treemap import hoverlabel
|
[
"griffin.harrisonn@gmail.com"
] |
griffin.harrisonn@gmail.com
|
fa8d00db3ea73e911ad4a1363ccf293ffc013c88
|
e7bb312083171ccede50fc52b7e848b19f8649a8
|
/03_recursion/01_fibonacci.py
|
04797a521e0e14fde6a5b895ac49bde90dd9fab9
|
[
"Apache-2.0"
] |
permissive
|
shfscut/grokking
|
74a96657c9c4d5b52d13d4896c6436e71cc05d60
|
53da4768793941a71d6ae2bf0edcf24f3b8a7480
|
refs/heads/master
| 2022-10-03T03:01:56.455327
| 2020-06-08T16:15:22
| 2020-06-08T16:15:22
| 267,741,102
| 0
| 0
|
Apache-2.0
| 2020-06-08T16:15:23
| 2020-05-29T01:55:48
|
Python
|
UTF-8
|
Python
| false
| false
| 464
|
py
|
# coding: utf8
"""
fibonacci数列
F(0)=0
F(1)=1
F(n)=F(n-1)+F(n-2)(n≧2)
递归:
1. 基线条件: 当n=0或1时,不再递归,返回具体值
2. 递归条件: 当n>=2时,递归
"""
def fibonacci(n):
if n==0:
return 0
if n==1:
return 1
return fibonacci(n-1)+fibonacci(n-2)
if __name__ == '__main__':
for i in range(10):
print(fibonacci(i))
# f(3)+f(2)
# f(2)+f(1)+f(2)
# f(1)+f(0)+f(1)+f(1)+f(0)
|
[
"noreply@github.com"
] |
shfscut.noreply@github.com
|
e8bff6d568827425ea7599f91c89270112c1019a
|
377c0a2a071cc950745f97019ef300258a46ebfe
|
/bankingsys/settings.py
|
145d1cfca02bca08bba97a3b696ec3520fa58e10
|
[] |
no_license
|
valak-me/sparks-foundation-task
|
6acfa0acbd9bd0710bd3f426df4eaae04c3b7def
|
1fee4e004540e3341d84fe467d0691059b403a48
|
refs/heads/master
| 2023-01-11T10:02:25.599526
| 2020-11-21T11:12:28
| 2020-11-21T11:12:28
| 314,787,825
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,128
|
py
|
"""
Django settings for bankingsys project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'y9c0w7j6j=tkp$^dd0qile8z00zoomdq1i5gd716uni@tqaxv9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'banksys'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bankingsys.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bankingsys.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT= 'banksys/static'
|
[
"rajneesh2399@gmail.com"
] |
rajneesh2399@gmail.com
|
c5db7879da91e3f8ef39214f55aa80dd9bc9d334
|
6005ef3c32a7345a45f2ba93c650fe3a54529b5f
|
/examples/example_37.py
|
89d95631e9596623b295270341b4b6fa85ea0feb
|
[] |
no_license
|
abasllari/SDA_LH1AL
|
b1e52f09aa9fa29cc201e0abba1cdf4f9bade063
|
28a8905420a39d63d4227abcecf8c1a388320b82
|
refs/heads/main
| 2023-07-30T14:00:56.503083
| 2021-09-21T19:40:22
| 2021-09-21T19:40:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 359
|
py
|
import lh1al.test_one
from lh1al.test_one import var1 as variable
from lh1al.test_one import resolve_equation as function
def main():
print("Hello World!")
print(variable)
print(lh1al.test_one.var2)
x = lh1al.test_one.resolve_equation(2, 3, "+")
print(x)
y = function(5, 10, "+")
print(y)
if __name__ == "__main__":
main()
|
[
"patryk.walaszkowski@jeppesen.com"
] |
patryk.walaszkowski@jeppesen.com
|
067109e4d6915c0415fc1c9894fb4c6813711533
|
2daf262c0d2645af4291e704c9efe3b12a62866f
|
/emu_share/pyemu/util.py
|
499ae57b880ebd9358858b08d4babe48e2397194
|
[
"MIT"
] |
permissive
|
SiChiTong/emu_manipulator
|
94cde00e5e1509383f218db512de04c9a6b2da38
|
7046442f9d2a3c617603ebb508e1a95dbb09cce9
|
refs/heads/master
| 2022-11-27T04:54:36.984760
| 2020-08-01T04:57:37
| 2020-08-01T04:57:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,148
|
py
|
import numpy as np
from math import sin, cos, pi
class kinematics:
@staticmethod
def rot(theta, axis):
c = cos(theta)
s = sin(theta)
if axis is 'x':
H = np.matrix([[1, 0, 0, 0], [0, c, -s, 0], [0, s, c, 0], [0, 0, 0, 1]])
elif axis is 'y':
H = np.matrix([[c, 0, s, 0], [0, 1, 0, 0], [-s, 0, c, 0], [0, 0, 0, 1]])
elif axis is 'z':
H = np.matrix([[c, -s, 0, 0], [s, c, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])
else:
print ('Incorrect axis of rotation')
return H
@staticmethod
def transl(dist, axis):
H = np.identity(4)
if axis is 'x':
idx = 0
elif axis is 'y':
idx = 1
elif axis is 'z':
idx = 2
else:
print ('Incorrect axis of translation')
H[idx][3] = dist
return H
@staticmethod
def fk(q, dh, rho):
n = len(rho)
prevTf = np.eye(4)
tf = []
for i in range(6):
tf.append(prevTf*kinematics.rot(dh[i][0]+(rho[i]*q[i]), 'z')*kinematics.transl(dh[i][1]+(-rho[i]+1)*q[i], 'z')*kinematics.transl(dh[i][2], 'x')*kinematics.rot(dh[i][3], 'x'))
prevTf = tf[i]
return tf
@staticmethod
def mj(q, dh, rho):
n = len(rho)
J = []
tf = kinematics.fk(q, dh, rho)
for i in range(n):
o_n = tf[i][0:3, 3]
z_prev = np.matrix('0; 0; 1')
o_prev = np.matrix('0; 0; 0')
Ji = np.matrix([[]]*n)
for j in range(i+1):
Jvi = np.cross(np.array(rho[j]*z_prev).reshape(3), np.array(o_n-o_prev).reshape(3)).reshape(3,1)
Jwi = rho[j]*z_prev
Jj = np.concatenate((Jwi, Jvi),axis=0)
Ji = np.concatenate((Ji, Jj),axis=1)
z_prev = tf[j][0:3, 2];
o_prev = tf[j][0:3, 3];
# Ji = np.concatenate((Ji, np.zeros((6,5-i))),axis=1)
J.append(Ji)
# print(J[2].round(decimals = 5))
return J
|
[
"c.thanapong@aol.com"
] |
c.thanapong@aol.com
|
5316355c24bdc0449e789d9042a10d12f9f00214
|
a88a6c94961147c905cde354afd1fafbd9253f77
|
/mysite/mysite/urls.py
|
0127338af13d45f129196b9f067b615b44c60281
|
[] |
no_license
|
WooHee98/Django_INDIMOVIE
|
659d56e8e6f1c588d6c95135d8a82c16cc8d1c72
|
f66696c29ddbf96d55801310df033232c4c26081
|
refs/heads/master
| 2021-01-16T06:55:31.034676
| 2020-02-25T14:12:41
| 2020-02-25T14:12:41
| 243,016,124
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,154
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls import url, include
from mysite.views import HomeView
#import admin_logout추가
from mysite.views import CreateUserView, RegisteredView, EnrollView, admin_logout #EnrollmentView
#rest api
from rest_framework import routers
from api import views
app_name = 'indimovie'
router = routers.DefaultRouter()
router.register(r'users', views.UserViewSet)
urlpatterns = [
# 어드민 로그아웃이 무조건 admin경로보다 위에 있어야 인식하고 바로 리다이렉트가 된다.
url(r'^admin/logout/$', admin_logout),
url(r'^jet/', include('jet.urls', 'jet')), # Django JET URLS
url(r'^jet/dashboard/', include('jet.dashboard.urls', 'jet-dashboard')), # Django JET dashboard URLS
url(r'^admin/', admin.site.urls),
url(r'^$', HomeView.as_view(), name='home'),
# 장고에서 기본적으로 제공하는 로그인, 로그아웃 기능을 포함한 url
url(r'^accs/', include('django.contrib.auth.urls')),
#영화관 유저 등록을 위한 url
url(r'^accounts/signup$', CreateUserView.as_view(), name = 'signup'),
url(r'^accounts/login/done$', RegisteredView.as_view(), name = 'create_user_done'),
#api를 위한 url
url(r'^movie/', include('api.urls')),
url(r'^', include(router.urls)),
#시나리오 등록을 위한 url
url(r'^accounts/scenario$', EnrollView.as_view(), name = 'scenario_enroll'),
#url(r'^accounts/scenario/done$', EnrolledView.as_view(), name='scenario_done'),
]
|
[
"kerri981230@naver.com"
] |
kerri981230@naver.com
|
d154cd7f23a2dddb40667b53f77fad534d030f69
|
5d4f7ba238628c03fa39b214639a8de8d3bcd68b
|
/Project1/models/CNN.py
|
d613bd9fd78bc011cf3ea3a658a75aaf22340d3a
|
[] |
no_license
|
chronican/DL_project
|
cc6ec98b29ffb543608ffb7922cdc52b02faeb49
|
65ea6480277a77ea0cfded9f80801ada4d3d2c8c
|
refs/heads/master
| 2023-01-04T11:14:27.160825
| 2020-11-03T18:16:16
| 2020-11-03T18:16:16
| 309,768,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,760
|
py
|
import torch
from torch import nn
class RCL(nn.Module):
"""
Defines recurrent convolutional layer (RCL)
"""
def __init__(self, K, steps):
"""
Initializes RCL
:param K: number of feature maps in convolution
:param steps: number of time steps
"""
super(RCL, self).__init__()
self.steps = steps
self.conv = nn.Conv2d(K, K, kernel_size=3, stride=1, padding=1, bias=False)
self.bnList = nn.ModuleList([nn.BatchNorm2d(K) for i in range(steps)])
self.relu = nn.ReLU(inplace=True)
self.recurr = nn.Conv2d(K, K, kernel_size=3, stride=1, padding=1, bias=False)
def forward(self, x):
rx = x # initialize recurrent state
for i in range(self.steps): # steps <= 3
if i == 0:
x = self.conv(x) # only feed-forward connection at first time step
else:
rx = self.recurr(rx) # recurrent state update
x = self.conv(x) + rx # output in time update
x = self.relu(x)
x = self.bnList[i](x)
return x
class CNN(nn.Module):
"""
(Recurrent) convolutional neural network
"""
def __init__(self, channels, num_classes, weight_sharing, auxiliary_loss, K = 32, steps = 3):
"""
initialize the model
:param channels: input channel number
:param num_classes: output channel number
:param weight_sharing: boolean flag for weight sharing application
:param auxiliary_loss: boolean flag for auxiliary loss application
:param K: number of feature maps in convolution
:param steps: time step for recurrent convolutional layer, also if no weight sharing, number of replacement convolutional layers
"""
super(CNN, self).__init__()
assert channels == 2 # check input channel is 2
self.weight_sharing = weight_sharing
self.auxiliary_loss = auxiliary_loss
self.K = K
self.steps = steps
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm2d(K)
self.bn2 = nn.BatchNorm2d(K)
self.pooling = nn.MaxPool2d(kernel_size = 3, stride = 2, padding = 1)
self.convList1 = nn.ModuleList([nn.Conv2d(K, K, kernel_size=3, stride=1, padding=1, bias = False) for i in range(steps)])
self.bnList1 = nn.ModuleList([nn.BatchNorm2d(K) for i in range(steps)])
self.convList2 = nn.ModuleList([nn.Conv2d(K * 2, K * 2, kernel_size=3, stride=1, padding=1, bias = False) for i in range(steps)])
self.bnList2 = nn.ModuleList([nn.BatchNorm2d(K * 2) for i in range(steps)])
self.layer1 = nn.Conv2d(1, K, kernel_size = 3, padding = 1)
self.layer2 = nn.Conv2d(1, K, kernel_size = 3, padding = 1)
self.rcl1 = RCL(K, steps=steps)
self.rcl2 = RCL(K, steps=steps)
self.rcl3 = RCL(K * 2, steps=steps)
self.fc = nn.Sequential(nn.Linear(K * 2 * 7 * 7, 128, bias = True), nn.ReLU(), nn.Linear(128, num_classes, bias = True))
self.dropout = nn.Dropout(p=0.3)
self.fc_aux = nn.Linear(K * 7 * 7, 10)
def forward(self, x):
# split 2 channel input into two images
x1 = torch.unsqueeze(x[:,0],dim=1)
x2 = torch.unsqueeze(x[:,1],dim=1)
x1 = self.bn1(self.relu(self.layer1(x1)))
x2 = self.bn2(self.relu(self.layer2(x2)))
x1 = self.pooling(x1)
x2 = self.pooling(x2)
x1 = self.dropout(x1)
x2 = self.dropout(x2)
if self.weight_sharing: # weight sharing case: RCNN
x1 = self.rcl1(x1)
x2 = self.rcl2(x2)
else: # no weight sharing case: CNN
for i in range(self.steps):
x1 = self.convList1[i](x1)
x2 = self.convList1[i](x2)
x1 = self.relu(x1)
x2 = self.relu(x2)
x1 = self.bnList1[i](x1)
x2 = self.bnList1[i](x2)
x1 = self.dropout(x1)
x2 = self.dropout(x2)
# concatenate
x = torch.cat((x1, x2), dim = 1)
if self.weight_sharing: # weight sharing case: RCNN
x = self.rcl3(x)
else: # no weight sharing case: CNN
for i in range(self.steps):
x = self.convList2[i](x)
x = self.relu(x)
x = self.bnList2[i](x)
x = x.view(-1, self.K * 2 * 7 * 7)
x = self.dropout(x)
# fully connected layers
x = self.fc(x)
if self.auxiliary_loss: # with auxiliary loss
y1 = x1.view(-1, self.K * 7 * 7)
y2 = x2.view(-1, self.K * 7 * 7)
y1 = self.fc_aux(y1)
y2 = self.fc_aux(y2)
return y1, y2, x
else: # no auxiliary loss
return x
|
[
"xiaoqi.ma@epfl.ch"
] |
xiaoqi.ma@epfl.ch
|
2e71a54d819691fa7b42aba1a60e86d16aafbd11
|
9fb4711cc5874756e86adc8db3b65fbac824fd55
|
/sayı-harf.py
|
9c2ac16c9576c551e859c03e93ea61098f14c25f
|
[] |
no_license
|
Ebuyuktas/8.hafta_odevler-Fonksiyonlar
|
d93cec0e0e8c14c72b3fb04eff44718ab5399894
|
3776f4fc52daade336230dc16b458474fa708c94
|
refs/heads/master
| 2020-07-02T08:13:30.333535
| 2019-08-10T10:14:46
| 2019-08-10T10:14:46
| 201,469,153
| 0
| 0
| null | 2019-08-09T13:06:31
| 2019-08-09T13:06:30
| null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
#odev 6#
#rakam olarak verilen sayinin harflerle yazilisi#
print("""harf olarak yazılmasını istediginiz sayiyi rakam olarak giriniz
En fazla iki basamaklı bir sayi girebilirsiniz\n""")
def oku():
birler=[" ", "bir", "iki", "uc", "dort", "bes", "alti", "yedi", "sekiz", "dokuz"]
onlar=["on","yirmi", "otuz", "kirk", "elli", "altmis", "yetmis", "seksen", "doksan"]
sayi=input("cift basamakalı sayi giriniz: ")
return onlar[int(sayi[0])-1]+" " + birler[int(sayi[1])]
print(oku())
|
[
"noreply@github.com"
] |
Ebuyuktas.noreply@github.com
|
48e9805d797ec371e1ca1a8a2141d7d9a0146988
|
cb59dbba764a530741174e694cac475c860e120c
|
/TermProj/settings.py
|
2f85b19022148454601202178181913986c990db
|
[] |
no_license
|
EngTurtle/StatsTypingTest
|
75077d159a0b089bbe8ac94012ed29fc89c27cc0
|
193f41346c338dad56819e944f42ec0bccdf5ea4
|
refs/heads/master
| 2016-08-04T06:39:42.294966
| 2013-04-06T02:22:06
| 2013-04-06T02:22:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,872
|
py
|
# Django settings for TermProj project.
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'database.db', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-ca'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(os.path.dirname(__file__), 'statics').replace('\\', '/').replace('TermProj/','')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = 'http://individual.utoronto.ca/oliver_liang/TermProj/'
if DEBUG:
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'bootstrap').replace('\\', '/').replace('TermProj/',''),
os.path.join(os.path.dirname(__file__), 'assets').replace('\\', '/').replace('TermProj/',''),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ymz9y*hnipo2c#y4e3u)x%@)0az57geuvjo&35ych0l^!*_dv-'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'TermProj.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'TermProj.wsgi.application'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates').replace('\\', '/').replace('TermProj/',''),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'TypingSpeed',
'gunicorn',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#heroku settings
import dj_database_url
if os.getenv('DATABASE_URL'):
DATABASES[ 'default' ] = dj_database_url.config()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
[
"ollie.liang@gmail.com"
] |
ollie.liang@gmail.com
|
96484174d1f096d926cc4c5d36694e4fafa398a8
|
978a1ad70dae800f3ca0eee341a4c72445ac408a
|
/zgadywanka.py
|
a93fc5f523a165a4d85bb80b9c376b5088e7f67b
|
[] |
no_license
|
lwiesek/Python
|
e56f1734a09598fec4bcfc16c7688ffdd9a97d83
|
2120e390c9698e65b84161f55cbc6aac716574d5
|
refs/heads/master
| 2020-09-03T17:13:07.359288
| 2020-04-14T06:47:38
| 2020-04-14T06:47:38
| 219,518,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 843
|
py
|
import random
print('Czesc! Jak masz na imie?')
mojeImie=input()
liczba=random.randint(1,20)
print('Sluchaj, ' + mojeImie + ' , mysle o liczbie z przedzialu 1 do 20.')
for wykonaneProby in range(6):
print('Sprobuj odgadnac.')
probaOdgadniecia=input()
probaOdgadniecia=int(probaOdgadniecia)
if probaOdgadniecia < liczba:
print('Twoja liczba jest za mala')
if probaOdgadniecia > liczba:
print('Twoja liczba jest za duza')
if probaOdgadniecia == liczba:
break
if probaOdgadniecia == liczba:
wykonaneProby=str(wykonaneProby +1)
print('Swietna robota, ' +mojeImie + '! Udalo ci sie odgadnac w ' + wykonaneProby + ' probach!')
if probaOdgadniecia !=liczba:
liczba=str(liczba)
print('Niestety nie. Liczba, ktora mialem na mysli to ' + liczba + '.')
|
[
"noreply@github.com"
] |
lwiesek.noreply@github.com
|
adf92b9853f239378d2b197e43c8d8a74f7187bb
|
a8098dfdd09ae9e56207134e8617807c14c5d5f7
|
/RestBucks/asgi.py
|
660df548f6024e0481bb7dac3e772f23f2be883a
|
[] |
no_license
|
NiloofarShahbaz/DjangoRestBucks
|
be773c2cce0c3daee15a499e2127c46d5f00d3c7
|
5f3e98fb9c91115f2497c4991668b2d3fad81e8f
|
refs/heads/master
| 2023-07-04T20:35:23.821603
| 2021-08-25T10:49:28
| 2021-08-25T10:51:21
| 399,004,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
"""
ASGI config for RestBucks project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RestBucks.settings")
application = get_asgi_application()
|
[
"niloofar.shahbaz@gmail.com"
] |
niloofar.shahbaz@gmail.com
|
47cb4095a51debdf177b8528a64fbd589e3a7b4c
|
6219d4968f3c17360af1a1fabc1f985f0c8947c4
|
/snake.py
|
82a0a30b2b13758ec2daa18be379b0d2555790f7
|
[] |
no_license
|
apgupta3091/snake_game
|
65c56059930e3650cd74c18f7db42a92feb34f3c
|
1dd14eb707dcfbc9fc22ec0bc0c4b7a4172a01df
|
refs/heads/main
| 2023-07-18T22:28:08.414950
| 2021-09-27T18:38:33
| 2021-09-27T18:38:33
| 410,754,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
from turtle import Turtle
STARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)]
MOVE_DISTANCE = 20
UP = 90
DOWN = 270
LEFT = 180
RIGHT = 0
class Snake:
def __init__(self):
self.segments = []
self.create_snake()
self.head = self.segments[0]
def create_snake(self):
for position in STARTING_POSITIONS:
self.add_segment(position)
def add_segment(self, position):
new_snake = Turtle("square")
new_snake.color("white")
new_snake.penup()
new_snake.goto(position)
self.segments.append(new_snake)
def reset(self):
for seg in self.segments:
seg.goto(1000,1000)
self.segments.clear()
self.create_snake()
self.head = self.segments[0]
def extend(self):
self.add_segment(self.segments[-1].position())
def move(self):
for seg_num in range(len(self.segments) - 1, 0, -1):
new_x = self.segments[seg_num - 1].xcor()
new_y = self.segments[seg_num - 1].ycor()
self.segments[seg_num].goto(new_x, new_y)
self.head.forward(MOVE_DISTANCE)
def up(self):
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
|
[
"apgupta3091@gmail.com"
] |
apgupta3091@gmail.com
|
1ecfedcd7d9fc961599cc239379cdd6d4486adb3
|
ef6d4d29244c5feda14067d571d2c9b24c7b47a8
|
/vagrant/catalog/handler_utils.py
|
a1056ff896ca3ac9969be478e97b832a0aa5e24d
|
[] |
no_license
|
csarradet/fsndp3
|
9a17204244b85d7506c8d33574a4c520df4f85b1
|
7def768c39c609be79e886378ac68fab69a1821c
|
refs/heads/master
| 2020-07-12T04:13:49.755754
| 2016-03-21T01:11:16
| 2016-03-21T01:11:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,222
|
py
|
"""
This file contains helper functions to make our web handler code
cleaner.
"""
import time
import datetime
from flask import make_response, render_template
import json
from session_utils import get_active_user
from rfc3339 import rfc3339
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
from dal import list_items_by_cat
from entities import Entity
from session_utils import get_current_nonce
def date_to_atom_friendly(date):
"""
Converts dates from our default representation to an Atom-friendly RFC-3339 format.
Uses a third party library released under a free license (see rfc3339.py for details).
Uses code from http://stackoverflow.com/questions/9637838/convert-string-date-to-timestamp-in-python
"""
parsed = time.mktime(datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S").timetuple())
return rfc3339(parsed)
def __create_response(obj, content_type, http_status_code):
""" Creates an HTTP response of the given content_type and status """
response = make_response(obj, http_status_code)
response.headers["Content-Type"] = content_type
return response
def create_atom_response(obj, http_status_code=200):
"""Dumps the provided object into a response with MIME type set for an Atom feed."""
return __create_response(obj, "application/atom+xml", http_status_code)
def create_json_response(obj, http_status_code=200):
"""
Dumps the provided object into a JSON response and returns a success code.
Assumes that obj is already in JSON format.
"""
return __create_response(obj, "application/json", http_status_code)
def create_err_response(message, err_code):
"""
Logs the error and creates a corresponding HTTP error response.
Handlers calling this function (or the xxx_error() functions below)
should stop executing handler logic and return this response immediately.
"""
response = create_json_response(json.dumps(message), http_status_code=err_code)
logger.error("{} error: {}".format(err_code, message))
return response
# Convenience definitions for common HTTP error codes:
def bad_request_error():
return create_err_response("Your request contained invalid data", 400)
def not_authenticated_error():
return create_err_response("You must log in to access the requested resource", 401)
def not_authorized_error():
return create_err_response("You don't have permission to access the requested resource", 403)
def not_found_error():
return create_err_response("The requested resource was not found", 404)
def already_exists_error():
return create_err_response("Unable to create -- that resource already exists", 400)
def internal_error():
return create_err_response("Internal server error", 500)
def render(filename, **kwargs):
"""
Decorator for flask's render_template() function.
Passes along any provided kwargs after adding in a few fields
required by our base template, like info on the logged in user
and sidebar items.
"""
kwargs["current_user"] = get_active_user()
kwargs["items_by_cat"] = list_items_by_cat()
kwargs["state"] = get_current_nonce()
return render_template(filename, **kwargs)
|
[
"csarradet@gmail.com"
] |
csarradet@gmail.com
|
ebabfa199a4b1f5b1f4c7a79f6041c4ebd6e6d34
|
c8326af2c59919b5c09ffb49f425a24ad0a73d59
|
/onderhoud/urls.py
|
77a914218f92681cfe9f7087da57e44c6e05a935
|
[] |
no_license
|
snirp/pressie
|
0dd4aa86ac4351da40f8b656fe671f7b2b17db0c
|
d834a982ef73f11b818eec99518961d8f1831e26
|
refs/heads/master
| 2021-01-18T14:48:53.532328
| 2015-06-17T09:17:39
| 2015-06-17T09:17:39
| 27,434,235
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
from django.conf.urls import patterns, url
from onderhoud import views
urlpatterns = patterns('',
url(r'^complex/$', views.complex_list, name='complex_list'),
url(r'^begroting/$', views.begroting_list, name='begroting-list'),
url(r'^begroting/(?P<pk>\d+)/$', views.begroting, name='begroting'),
url(r'^conditiemeting/$', views.conditiemetingen, name='conditiemeting_list'),
url(r'^conditiemeting/(?P<pk>\d+)/$', views.conditiemeting, name='conditiemeting_detail'),
url(r'^cm/(?P<pk>\d+)/$', views.cm, name='cm_detail'),
url(r'^conditiefoto/(?P<pk>\d+)/$', views.conditiefoto, name='conditiefoto'),
url(r'^gebreken/$', views.gebreken_tabel, name='gebreken_tabel')
)
|
[
"prinsroy@gmail.com"
] |
prinsroy@gmail.com
|
22499ba6ec4ddebf187039f89fa73ea90c0e375e
|
228f0378c71dfe814291f7adf657674a4cdf78ad
|
/tests/components/airzone/util.py
|
8f7b8910c8cf101f65d38432bbec39eccb0dcec8
|
[
"Apache-2.0"
] |
permissive
|
K4rl1Hub/home-assistant
|
cdfc3ad552518ce634d9e8282a86db22cb8dbafe
|
96d64bd6b70c28bbcdcf50ade01fd09f4a1f2e75
|
refs/heads/dev
| 2023-03-10T09:22:18.812076
| 2022-04-25T18:46:37
| 2022-04-25T18:46:37
| 176,307,853
| 0
| 0
|
Apache-2.0
| 2023-02-22T06:21:12
| 2019-03-18T14:49:50
|
Python
|
UTF-8
|
Python
| false
| false
| 6,329
|
py
|
"""Tests for the Airzone integration."""
from unittest.mock import patch
from aioairzone.const import (
API_AIR_DEMAND,
API_COLD_STAGE,
API_COLD_STAGES,
API_DATA,
API_ERRORS,
API_FLOOR_DEMAND,
API_HEAT_STAGE,
API_HEAT_STAGES,
API_HUMIDITY,
API_MAX_TEMP,
API_MIN_TEMP,
API_MODE,
API_MODES,
API_NAME,
API_ON,
API_ROOM_TEMP,
API_SET_POINT,
API_SYSTEM_ID,
API_SYSTEMS,
API_THERMOS_FIRMWARE,
API_THERMOS_RADIO,
API_THERMOS_TYPE,
API_UNITS,
API_ZONE_ID,
)
from aioairzone.exceptions import InvalidMethod, SystemOutOfRange
from homeassistant.components.airzone import DOMAIN
from homeassistant.const import CONF_HOST, CONF_ID, CONF_PORT
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry
CONFIG = {
CONF_HOST: "192.168.1.100",
CONF_PORT: 3000,
CONF_ID: 0,
}
CONFIG_NO_ID = {
CONF_HOST: CONFIG[CONF_HOST],
CONF_PORT: CONFIG[CONF_PORT],
}
CONFIG_ID1 = {
CONF_HOST: CONFIG[CONF_HOST],
CONF_PORT: CONFIG[CONF_PORT],
CONF_ID: 1,
}
HVAC_MOCK = {
API_SYSTEMS: [
{
API_DATA: [
{
API_SYSTEM_ID: 1,
API_ZONE_ID: 1,
API_NAME: "Salon",
API_THERMOS_TYPE: 2,
API_THERMOS_FIRMWARE: "3.51",
API_THERMOS_RADIO: 0,
API_ON: 0,
API_MAX_TEMP: 30,
API_MIN_TEMP: 15,
API_SET_POINT: 19.1,
API_ROOM_TEMP: 19.6,
API_MODES: [1, 4, 2, 3, 5],
API_MODE: 3,
API_COLD_STAGES: 1,
API_COLD_STAGE: 1,
API_HEAT_STAGES: 1,
API_HEAT_STAGE: 1,
API_HUMIDITY: 34,
API_UNITS: 0,
API_ERRORS: [],
API_AIR_DEMAND: 0,
API_FLOOR_DEMAND: 0,
},
{
API_SYSTEM_ID: 1,
API_ZONE_ID: 2,
API_NAME: "Dorm Ppal",
API_THERMOS_TYPE: 4,
API_THERMOS_FIRMWARE: "3.33",
API_THERMOS_RADIO: 1,
API_ON: 1,
API_MAX_TEMP: 30,
API_MIN_TEMP: 15,
API_SET_POINT: 19.2,
API_ROOM_TEMP: 21.1,
API_MODE: 3,
API_COLD_STAGES: 1,
API_COLD_STAGE: 1,
API_HEAT_STAGES: 3,
API_HEAT_STAGE: 3,
API_HUMIDITY: 39,
API_UNITS: 0,
API_ERRORS: [],
API_AIR_DEMAND: 1,
API_FLOOR_DEMAND: 1,
},
{
API_SYSTEM_ID: 1,
API_ZONE_ID: 3,
API_NAME: "Dorm #1",
API_THERMOS_TYPE: 4,
API_THERMOS_FIRMWARE: "3.33",
API_THERMOS_RADIO: 1,
API_ON: 1,
API_MAX_TEMP: 30,
API_MIN_TEMP: 15,
API_SET_POINT: 19.3,
API_ROOM_TEMP: 20.8,
API_MODE: 3,
API_COLD_STAGES: 1,
API_COLD_STAGE: 1,
API_HEAT_STAGES: 2,
API_HEAT_STAGE: 2,
API_HUMIDITY: 35,
API_UNITS: 0,
API_ERRORS: [],
API_AIR_DEMAND: 0,
API_FLOOR_DEMAND: 0,
},
{
API_SYSTEM_ID: 1,
API_ZONE_ID: 4,
API_NAME: "Despacho",
API_THERMOS_TYPE: 4,
API_THERMOS_FIRMWARE: "3.33",
API_THERMOS_RADIO: 1,
API_ON: 0,
API_MAX_TEMP: 86,
API_MIN_TEMP: 59,
API_SET_POINT: 66.92,
API_ROOM_TEMP: 70.16,
API_MODE: 3,
API_COLD_STAGES: 1,
API_COLD_STAGE: 1,
API_HEAT_STAGES: 1,
API_HEAT_STAGE: 1,
API_HUMIDITY: 36,
API_UNITS: 1,
API_ERRORS: [
{
"Zone": "Low battery",
},
],
API_AIR_DEMAND: 0,
API_FLOOR_DEMAND: 0,
},
{
API_SYSTEM_ID: 1,
API_ZONE_ID: 5,
API_NAME: "Dorm #2",
API_THERMOS_TYPE: 4,
API_THERMOS_FIRMWARE: "3.33",
API_THERMOS_RADIO: 1,
API_ON: 0,
API_MAX_TEMP: 30,
API_MIN_TEMP: 15,
API_SET_POINT: 19.5,
API_ROOM_TEMP: 20.5,
API_MODE: 3,
API_COLD_STAGES: 1,
API_COLD_STAGE: 1,
API_HEAT_STAGES: 1,
API_HEAT_STAGE: 1,
API_HUMIDITY: 40,
API_UNITS: 0,
API_ERRORS: [],
API_AIR_DEMAND: 0,
API_FLOOR_DEMAND: 0,
},
]
}
]
}
async def async_init_integration(
hass: HomeAssistant,
) -> None:
"""Set up the Airzone integration in Home Assistant."""
entry = MockConfigEntry(domain=DOMAIN, data=CONFIG)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.airzone.AirzoneLocalApi.get_hvac",
return_value=HVAC_MOCK,
), patch(
"homeassistant.components.airzone.AirzoneLocalApi.get_hvac_systems",
side_effect=SystemOutOfRange,
), patch(
"homeassistant.components.airzone.AirzoneLocalApi.get_webserver",
side_effect=InvalidMethod,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
|
[
"noreply@github.com"
] |
K4rl1Hub.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.