hexsha
stringlengths 40
40
| size
int64 7
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
247
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
368k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
247
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
247
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.04M
| avg_line_length
float64 1.77
618k
| max_line_length
int64 1
1.02M
| alphanum_fraction
float64 0
1
| original_content
stringlengths 7
1.04M
| filtered:remove_function_no_docstring
int64 -102
942k
| filtered:remove_class_no_docstring
int64 -354
977k
| filtered:remove_delete_markers
int64 0
60.1k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f77ea59bf822539953a84da90bca3bded5b1d71d
| 4,051
|
py
|
Python
|
spider/wenkubaidu/wenku.py
|
JackyYuanjie/python-scripts
|
490eb9668bda6db004ae87d204588fb6ffe56051
|
[
"Apache-2.0"
] | 1
|
2021-07-08T05:09:38.000Z
|
2021-07-08T05:09:38.000Z
|
spider/wenkubaidu/wenku.py
|
JackyYuanjie/python-scripts
|
490eb9668bda6db004ae87d204588fb6ffe56051
|
[
"Apache-2.0"
] | null | null | null |
spider/wenkubaidu/wenku.py
|
JackyYuanjie/python-scripts
|
490eb9668bda6db004ae87d204588fb6ffe56051
|
[
"Apache-2.0"
] | 1
|
2020-01-09T07:29:17.000Z
|
2020-01-09T07:29:17.000Z
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import urllib
import requests
from bs4 import BeautifulSoup
"""
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=22&rn=1&type=ppt&callback=bd__cbs__s5lw72
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=23&rn=1&type=ppt&callback=bd__cbs__coo5j5
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=21&rn=1&type=ppt&callback=bd__cbs__2hc9ds
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=5&rn=1&type=ppt&callback=bd__cbs__nh2gao
"""
linkfiles = "F:\\PythonProject\\python-scripts\\spider\\wenkubaidu\\odnimages\\"
class WK():
'''
百度文库
'''
if __name__=="__main__":
wk = WK()
for pn in range(1,26):
url = 'https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn={}&rn=1&type=ppt&callback=bd__cbs__nh2gao'.format(pn)
print(url,"下载完成")
wk.spyder(url)
"""
with open(linkfiles + "wenkulink.txt",'a+') as fw:
# fw.write(url) # 是统计的页数连接,可以从中获取到图片的链接
# fw.write("\n")
"""
# wk.spyder(wk.baseUrl)
"""
注意该网址粘贴到浏览器上访问是可以的,但是在代码中若不替换\该字符,会导致报错.
https:\/\/wkretype.bdimg.com\/retype\/zoom\/6a30bde2f8c75fbfc77db23c?pn=4&raww=1080&rawh=810&o=jpg_6&md5sum=f9ace759cd13bfd0f9ad186d77af05fa&sign=0756077547&png=41164-280359&jpg=227559-365825
"""
| 44.032609
| 883
| 0.689459
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import urllib
import requests
from bs4 import BeautifulSoup
"""
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=22&rn=1&type=ppt&callback=bd__cbs__s5lw72
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=23&rn=1&type=ppt&callback=bd__cbs__coo5j5
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=21&rn=1&type=ppt&callback=bd__cbs__2hc9ds
https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn=5&rn=1&type=ppt&callback=bd__cbs__nh2gao
"""
linkfiles = "F:\\PythonProject\\python-scripts\\spider\\wenkubaidu\\odnimages\\"
class WK():
'''
百度文库
'''
def __init__(self):
self.baseUrl = "https://wenku.baidu.com/view/564fc70a77a20029bd64783e0912a21615797ff7.html"
self.header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
def getResponse(self,url):
try:
req = urllib.request.Request(url,headers = self.header)
response = urllib.request.urlopen(req,timeout = 10)
except:
print("页面请求失败")
else:
return response.read().decode('gb2312')
def spyder(self,url):
html = self.getResponse(url)
# print(html)
start_index = html.find("https:")
# print(start_index)
print('-'*30)
end_index = html.find('","')
# print(end_index)
print(html[start_index:end_index])
"""
with open(linkfiles + "wenkucontent.txt",'a+') as fa:
fa.write(html)
fa.write("\n")
"""
header = self.header
header['Cookie'] = 'BAIDUID=2CC737B4D3E3D51EA7529F8065A8B708:FG=1; PSTM=1553749648; BIDUPSID=36D49C7DE8F84F920A6D6ADE0E719043; _click_param_pc_rec_doc_2017_testid=4; ZD_ENTRY=bing; cflag=13%3A3; session_name=cn.bing.com; isJiaoyuVip=1; wk_shifen_pop_window=7765_1_1567070315751; Hm_lvt_d8bfb560f8d03bbefc9bdecafc4a4bf6=1566318226,1566571568,1567070267,1567070708; session_id=1567070708094; BCLID=11327784929476180808; BDSFRCVID=aD0OJeC624LjSNrwjvtqhFVMiLK2tRQTH6055tzl7cu_UIsP_XwLEG0PDM8g0Ku-5SOpogKK0mOTHv-F_2uxOjjg8UtVJeC6EG0P3J; H_BDCLCKID_SF=JJ-qVCPbtDvbfP0kb-r_bPk0hNLHJK62aKDs3l-MBhcqEIL4jMv80UCX5U6q-no33HcuBlRcttbCVfbSj60hjJ0hhaJ2-lRPW67TMMn5Bp5nhMJeXj7JDMP0qHogWbOy523ion6vQpn-KqQ3DRoWXPIqbN7P-p5Z5mAqKl0MLIOkbRO4-TFaejOQDfK; userFirstTime=true; ___wk_scode_token=XdTTTDexiuWKJhoY9dcpx3hQOGs%2Bniyz9YrLayUnQsQ%3D; Hm_lpvt_d8bfb560f8d03bbefc9bdecafc4a4bf6=1567072063'
# print(header)
urlrep = html[start_index:end_index].replace('\\','')
# print(urlrep)
# req = requests.get('https://wkretype.bdimg.com//retype//zoom//6a30bde2f8c75fbfc77db23c?pn=4&raww=1080&rawh=810&o=jpg_6&md5sum=f9ace759cd13bfd0f9ad186d77af05fa&sign=0756077547&png=41164-280359&jpg=227559-365825')
req = requests.get(urlrep,headers = header)
"""
with open(linkfiles + "b.png",'wb') as fb:
fb.write(req.content)
"""
p_index = html.find('"page":')
p_end = html.find('}]')
pag = html[p_index+7:p_end]
with open(linkfiles + pag + ".png",'wb') as fb:
fb.write(req.content)
if __name__=="__main__":
wk = WK()
for pn in range(1,26):
url = 'https://wenku.baidu.com/browse/getrequest?doc_id=a1eec6289b6648d7c1c7468f&pn={}&rn=1&type=ppt&callback=bd__cbs__nh2gao'.format(pn)
print(url,"下载完成")
wk.spyder(url)
"""
with open(linkfiles + "wenkulink.txt",'a+') as fw:
# fw.write(url) # 是统计的页数连接,可以从中获取到图片的链接
# fw.write("\n")
"""
# wk.spyder(wk.baseUrl)
"""
注意该网址粘贴到浏览器上访问是可以的,但是在代码中若不替换\该字符,会导致报错.
https:\/\/wkretype.bdimg.com\/retype\/zoom\/6a30bde2f8c75fbfc77db23c?pn=4&raww=1080&rawh=810&o=jpg_6&md5sum=f9ace759cd13bfd0f9ad186d77af05fa&sign=0756077547&png=41164-280359&jpg=227559-365825
"""
| 2,563
| 0
| 80
|
dc824dbc29f0b42ffaa3b7d3fe8147c1f7a32031
| 18,983
|
py
|
Python
|
source/xgm_mod_options.py
|
Omni-9/warband_mod_source
|
c9737d7793ccdb185d8d3caedda0da915104e405
|
[
"BSD-Source-Code"
] | 14
|
2018-09-20T23:01:27.000Z
|
2021-05-25T11:05:09.000Z
|
source/xgm_mod_options.py
|
Omni-9/warband_mod_source
|
c9737d7793ccdb185d8d3caedda0da915104e405
|
[
"BSD-Source-Code"
] | 44
|
2018-09-15T03:05:50.000Z
|
2022-03-22T02:46:24.000Z
|
source/xgm_mod_options.py
|
Omni-9/warband_mod_source
|
c9737d7793ccdb185d8d3caedda0da915104e405
|
[
"BSD-Source-Code"
] | 13
|
2018-10-02T11:45:24.000Z
|
2021-08-22T18:41:44.000Z
|
from header_common import *
from header_presentations import *
from header_mission_templates import *
from ID_meshes import *
from header_operations import *
from header_triggers import *
from module_constants import *
#import string
from xgm_mod_options_header import *
############################################################################
## 0) overlay id (not used atm, but can allow searches in future. just put something unique)
## 1) overlay type (defined in xgm_mod_options_header)
## 2) overlay type specific parameters (e.g. for number box, it can be lower/upper range, for cbobox, it would be the cbo items etc)
## a) xgm_ov_numberbox : lower_bound(inclusive), upper_bound(exclusive). e.g. [0,101] for range of values from 0-100
## b) xgm_ov_combolabel/xgm_ov_combobutton : list of combo items. e.g. ["option1", "option2", "option3"]
## c) xgm_ov_slider : lower_bound(inclusive), upper_bound(exclusive). e.g. [0,101] for range of values from 0-100
## d) xgm_ov_checkbox : not used fttb. just leave empty. e.g. []
## 3) text label
## 4) reserved for text label flags
## 5) description (unused for now. may be used for stuff like tooltip in future)
## 6) reserved for description flags
## 7) initialization op block. Used for updating the overlay values from game values. Must assign the desired value to reg1.
## 8) update op block. Used for updating game values from overlay values. The overlay value is in reg1.
## 9) optional. reserved for option page id. unused for now. leave out for options using general page.
############################################################################
mod_options = [
("camp_fuck_setting", xgm_ov_combolabel, ["Disabled", "Consensual Only", "All Enabled"], "Sexual Content:", 0,
"Settings for sexual content in game.", 0,
[(try_begin),
(eq, "$g_sexual_content", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_sexual_content", 1),
(assign, reg1, 1),
(else_try),
(eq, "$g_sexual_content", 2),
(assign, reg1, 2),
(try_end),],
[(try_begin),
(eq, reg1, 0),
(assign, "$g_sexual_content", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_sexual_content", 1),
(else_try),
(eq, reg1, 2),
(assign, "$g_sexual_content", 2),
(try_end),
],
),
("dplmc_woman_prejudice", xgm_ov_combolabel, ["Historical", "Tolerant", "Utopian"], "Diplomacy - Prejudice:", 0,
"Setting for Diplomacy's prejudice changes.", 0,
[
(assign, reg1, "$g_disable_condescending_comments"),
],
[
(assign, "$g_disable_condescending_comments", reg1),
],
),
("camp_polygamy", xgm_ov_checkbox, [], "Polygamy:", 0,
"Toggles polygamy settings", 0,
[(try_begin),
(eq, "$g_polygamy", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_polygamy", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_polygamy", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_polygamy", 1),
(try_end),
],
),
( "camp_nohomobro", xgm_ov_checkbox , [],
"Disable Gay:", 0,
"Disables gay scenes.", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_nohomo"),
],
[ # update block (value is in reg1)
(assign, "$g_nohomo", reg1),
],
),
( "camp_no_dancers", xgm_ov_checkbox , [],
"Feast Dancers:", 0,
"Toggles dancers during feasts.", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_feast_dancers"),
],
[ # update block (value is in reg1)
(assign, "$g_feast_dancers", reg1),
],
),
("camp_dark_hunters", xgm_ov_checkbox, [], "Black Khergits and Dark Hunters:", 0,
"Settings for Dark Hunters and Black Khergits.", 0,
[
(try_begin),
(eq, "$g_dark_hunters_enabled", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_dark_hunters_enabled", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dark_hunters_enabled", 0),
(assign, ":removed", 0),
(try_for_parties, ":party_no"),
(party_get_template_id, ":ptid", ":party_no"),
(this_or_next|eq, ":ptid", "pt_dark_hunters"),
(eq, ":ptid", "pt_black_khergit_raiders"),
(remove_party, ":party_no"),
(val_add, ":removed", 1),
(try_end),
(assign, reg0, ":removed"),
(display_message, "@{reg0} parties removed from the map."),
(else_try),
(eq, reg1, 1),
(assign, "$g_dark_hunters_enabled", 1),
(try_end),
],
),
( "keep_companions", xgm_ov_checkbox , [],
"Keep Companions:", 0,
"Setting for keeping companions after defeat", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_keep_companions"),
],
[ # update block (value is in reg1)
(assign, "$g_keep_companions", reg1),
],
),
( "disable_complaints", xgm_ov_checkbox , [],
"Disable Complaints:", 0,
"Setting for disabling companion complaints", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$disable_npc_complaints"),
],
[ # update block (value is in reg1)
(assign, "$disable_npc_complaints", reg1),
],
),
( "disable_bodyguard", xgm_ov_checkbox , [],
"Disable Bodyguards:", 0,
"Setting for disabling companions as bodyguards", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$disable_bodyguards"),
],
[ # update block (value is in reg1)
(assign, "$disable_bodyguards", reg1),
],
),
("camp_realistic_wounding", xgm_ov_checkbox, [], "Realistic Casualties:", 0,
"Toggles realistic wounding for other damage types", 0,
[(try_begin),
(eq, "$g_realistic_wounding", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_realistic_wounding", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_realistic_wounding", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_realistic_wounding", 1),
(try_end),
],
),
("enable_shield_bash", xgm_ov_combolabel, ["Disabled", "Player Only", "All Combatants"], "Shield Bash:", 0,
"Setting for Diplomacy's prejudice changes.", 0,
[
(assign, reg1, "$g_enable_shield_bash"),
],
[
(assign, "$g_enable_shield_bash", reg1),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
( "dplmc_horsespeed", xgm_ov_checkbox , [],
"Diplomacy - Horse Speed:", 0,
"Setting for Diplomacy's horse speed changes", 0,
[ # initialization block (set value in reg1)
(store_sub,reg1,1,"$g_dplmc_horse_speed"),
],
[ # update block (value is in reg1)
(store_sub,"$g_dplmc_horse_speed",1,reg1),
],
),
( "dplmc_battlecontinue", xgm_ov_checkbox , [],
"Diplomacy - Battle Continuation:", 0,
"Setting for Diplomacy's battle continuation", 0,
[ # initialization block (set value in reg1)
(store_sub,reg1,1,"$g_dplmc_battle_continuation"),
],
[ # update block (value is in reg1)
(store_sub,"$g_dplmc_battle_continuation",1,reg1),
],
),
( "dplmc_disguise", xgm_ov_checkbox , [],
"Diplomacy - Disguise System:", 0,
"Setting for Diplomacy's disguise system", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_dplmc_player_disguise"),
],
[ # update block (value is in reg1)
(assign, "$g_dplmc_player_disguise", reg1),
],
),
( "dplmc_terrain_advantage", xgm_ov_checkbox , [],
"Diplomacy - Autocalc Terrain Advantage:", 0,
"Setting for Diplomacy's terrain advantage.", 0,
[ # initialization block (set value in reg1)
(try_begin),
(eq, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_ENABLE),
(assign, reg1, 1),
(try_end),
],
[ # update block (value is in reg1)
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_ENABLE),
(try_end),
],
),
( "dplmc_lord_recycling", xgm_ov_checkbox , [],
"Diplomacy - Returning From Exile:", 0,
"Setting for Diplomacy's terrain advantage.", 0,
[ # initialization block (set value in reg1)
(try_begin),
(eq, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_ENABLE),
(assign, reg1, 1),
(try_end),
],
[ # update block (value is in reg1)
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_ENABLE),
(try_end),
],
),
("dplmc_ai_changes_a", xgm_ov_combolabel, ["Disabled", "Low", "Medium", "High"], "Diplomacy - AI Changes:", 0,
"Setting for Diplomacy's AI changes.", 0,
[
(try_begin),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_LOW),
(assign, reg1, 1),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_MEDIUM),
(assign, reg1, 2),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_HIGH),
(assign, reg1, 3),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_LOW),
(else_try),
(eq, reg1, 2),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_MEDIUM),
(else_try),
(eq, reg1, 3),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_HIGH),
(try_end),
],
),
("dplmc_gold_changes", xgm_ov_combolabel, ["Disabled", "Low", "Medium", "High"], "Diplomacy - Economy Changes:", 0,
"Setting for Diplomacy's economy changes.", 0,
[
(try_begin),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_LOW),
(assign, reg1, 1),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_MEDIUM),
(assign, reg1, 2),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_HIGH),
(assign, reg1, 3),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_LOW),
(else_try),
(eq, reg1, 2),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_MEDIUM),
(else_try),
(eq, reg1, 3),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_HIGH),
(try_end),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
("minimap_setting", xgm_ov_combolabel, ["Compass Style", "Small Minimap", "Medium Minimap", "Large Minimap", "Disabled"], "Battle Minimap Overlay:", 0,
"Setting for the minimap.", 0,
[
(try_begin),
(eq, "$g_minimap_style", -1),
(assign, reg1, 4),
(else_try),
(assign, reg1, "$g_minimap_style"),
(try_end),
],
[
(try_begin),
(eq, reg1, 4),
(assign, "$g_minimap_style", -1),
(else_try),
(assign, "$g_minimap_style", reg1),
(try_end),
],
),
("minimap_setting", xgm_ov_combolabel, ["Disabled", "Only Allies", "Only Enemies", "All Troops"], "Troop HP Bars:", 0,
"Setting for troop HP bars.", 0,
[
(try_begin), # Ally
(eq, "$g_hp_bar_enemy", 0),
(eq, "$g_hp_bar_ally", 1),
(assign, reg1, 1),
(else_try), # Enemy
(eq, "$g_hp_bar_enemy", 1),
(eq, "$g_hp_bar_ally", 0),
(assign, reg1, 2),
(else_try), # Both
(eq, "$g_hp_bar_enemy", 1),
(eq, "$g_hp_bar_ally", 1),
(assign, reg1, 3),
(else_try), # None
(assign, reg1, 0),
(try_end),
],
[
(try_begin), # Ally
(eq, reg1, 1),
(assign, "$g_hp_bar_enemy", 0),
(assign, "$g_hp_bar_ally", 1),
(else_try), # Enemy
(eq, reg1, 2),
(assign, "$g_hp_bar_enemy", 1),
(assign, "$g_hp_bar_ally", 0),
(else_try), # Both
(eq, reg1, 3),
(assign, "$g_hp_bar_enemy", 1),
(assign, "$g_hp_bar_ally", 1),
(else_try), # None
(assign, "$g_hp_bar_enemy", 0),
(assign, "$g_hp_bar_ally", 0),
(try_end),
],
),
("minimap_setting", xgm_ov_numberbox, [3,81], "HP Bar Distance Limit:", 0,
"Setting for the HP Bars.", 0,
[
(assign, reg1, "$g_hp_bar_dis_limit"),
],
[
(assign, "$g_hp_bar_dis_limit", reg1),
],
),
("camp_troop_ratio_bar", xgm_ov_checkbox, [], "Troop ratio bar:", 0,
"Toggles troop ratio bar", 0,
[(try_begin),
(eq, "$g_troop_ratio_bar", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_troop_ratio_bar", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_troop_ratio_bar", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_troop_ratio_bar", 1),
(try_end),
],
),
("camp_decapitation", xgm_ov_checkbox, [], "Decapitation:", 0,
"Toggles Decapitation", 0,
[(try_begin),
(eq, "$g_decapitation_enabled", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_decapitation_enabled", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_decapitation_enabled", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_decapitation_enabled", 1),
(try_end),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
( "op_cheatmode", xgm_ov_checkbox , [],
"Cheat mode:", 0,
"This sets the in-game cheat mode", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$cheat_mode"),
],
[ # update block (value is in reg1)
(assign, "$cheat_mode", reg1),
],
),
] # mod_options
# TODO: add option pages here
# collation of all *_mod_options.py from active mods
# import and merge related variables from all {active_mod}_mod_options.py for all active mods
#try:
# from modmerger_options import options, mods_active
# from modmerger import mod_get_process_order, mod_is_active
# from util_common import add_objects
# modcomp_name = "mod_options"
# var_list = ["mod_options",]
#from modmerger import modmerge
#modmerge(var_set)
# mod_process_order = mod_get_process_order(modcomp_name)
# vars_to_import= ["mod_options"]
# for x in mod_process_order:
# if(mod_is_active(x) and x <> "xgm_mod_options"): # must exclude this file since we are using this file as base
# try:
#mergefn_name = "modmerge_%s"%(modcomp_name)
# target_module_name = "%s_%s"%(x,modcomp_name)
# _temp = __import__( target_module_name , globals(), locals(), vars_to_import,-1)
# logger.info("Merging objects for component \"%s\" from mod \"%s\"..."%(modcomp_name,x))
#
# add_objects(mod_options, _temp.mod_options) # import from target module.
#
# # TODO: collect option pages
# except ImportError:
# errstring = "Failed importing for component \"%s\" for mod \"%s\"." % (modcomp_name, x)
# logger.debug(errstring)
# else:
# errstring = "Mod \"%s\" not active for Component \"%s\"." % (x, modcomp_name)
# logger.debug(errstring)
#except:
# raise
# collation end
# At this point, mod_options will contain the list of all mod_options specified.
## utility functions
from util_wrappers import *
# helper wrapper to access mod_options
## class ModOptionWrapper
# this function will compute the total height required for a list of mod_options.
## mod_options_get_total_height
| 30.716828
| 155
| 0.566928
|
from header_common import *
from header_presentations import *
from header_mission_templates import *
from ID_meshes import *
from header_operations import *
from header_triggers import *
from module_constants import *
#import string
from xgm_mod_options_header import *
############################################################################
## 0) overlay id (not used atm, but can allow searches in future. just put something unique)
## 1) overlay type (defined in xgm_mod_options_header)
## 2) overlay type specific parameters (e.g. for number box, it can be lower/upper range, for cbobox, it would be the cbo items etc)
## a) xgm_ov_numberbox : lower_bound(inclusive), upper_bound(exclusive). e.g. [0,101] for range of values from 0-100
## b) xgm_ov_combolabel/xgm_ov_combobutton : list of combo items. e.g. ["option1", "option2", "option3"]
## c) xgm_ov_slider : lower_bound(inclusive), upper_bound(exclusive). e.g. [0,101] for range of values from 0-100
## d) xgm_ov_checkbox : not used fttb. just leave empty. e.g. []
## 3) text label
## 4) reserved for text label flags
## 5) description (unused for now. may be used for stuff like tooltip in future)
## 6) reserved for description flags
## 7) initialization op block. Used for updating the overlay values from game values. Must assign the desired value to reg1.
## 8) update op block. Used for updating game values from overlay values. The overlay value is in reg1.
## 9) optional. reserved for option page id. unused for now. leave out for options using general page.
############################################################################
mod_options = [
("camp_fuck_setting", xgm_ov_combolabel, ["Disabled", "Consensual Only", "All Enabled"], "Sexual Content:", 0,
"Settings for sexual content in game.", 0,
[(try_begin),
(eq, "$g_sexual_content", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_sexual_content", 1),
(assign, reg1, 1),
(else_try),
(eq, "$g_sexual_content", 2),
(assign, reg1, 2),
(try_end),],
[(try_begin),
(eq, reg1, 0),
(assign, "$g_sexual_content", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_sexual_content", 1),
(else_try),
(eq, reg1, 2),
(assign, "$g_sexual_content", 2),
(try_end),
],
),
("dplmc_woman_prejudice", xgm_ov_combolabel, ["Historical", "Tolerant", "Utopian"], "Diplomacy - Prejudice:", 0,
"Setting for Diplomacy's prejudice changes.", 0,
[
(assign, reg1, "$g_disable_condescending_comments"),
],
[
(assign, "$g_disable_condescending_comments", reg1),
],
),
("camp_polygamy", xgm_ov_checkbox, [], "Polygamy:", 0,
"Toggles polygamy settings", 0,
[(try_begin),
(eq, "$g_polygamy", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_polygamy", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_polygamy", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_polygamy", 1),
(try_end),
],
),
( "camp_nohomobro", xgm_ov_checkbox , [],
"Disable Gay:", 0,
"Disables gay scenes.", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_nohomo"),
],
[ # update block (value is in reg1)
(assign, "$g_nohomo", reg1),
],
),
( "camp_no_dancers", xgm_ov_checkbox , [],
"Feast Dancers:", 0,
"Toggles dancers during feasts.", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_feast_dancers"),
],
[ # update block (value is in reg1)
(assign, "$g_feast_dancers", reg1),
],
),
("camp_dark_hunters", xgm_ov_checkbox, [], "Black Khergits and Dark Hunters:", 0,
"Settings for Dark Hunters and Black Khergits.", 0,
[
(try_begin),
(eq, "$g_dark_hunters_enabled", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_dark_hunters_enabled", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dark_hunters_enabled", 0),
(assign, ":removed", 0),
(try_for_parties, ":party_no"),
(party_get_template_id, ":ptid", ":party_no"),
(this_or_next|eq, ":ptid", "pt_dark_hunters"),
(eq, ":ptid", "pt_black_khergit_raiders"),
(remove_party, ":party_no"),
(val_add, ":removed", 1),
(try_end),
(assign, reg0, ":removed"),
(display_message, "@{reg0} parties removed from the map."),
(else_try),
(eq, reg1, 1),
(assign, "$g_dark_hunters_enabled", 1),
(try_end),
],
),
( "keep_companions", xgm_ov_checkbox , [],
"Keep Companions:", 0,
"Setting for keeping companions after defeat", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_keep_companions"),
],
[ # update block (value is in reg1)
(assign, "$g_keep_companions", reg1),
],
),
( "disable_complaints", xgm_ov_checkbox , [],
"Disable Complaints:", 0,
"Setting for disabling companion complaints", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$disable_npc_complaints"),
],
[ # update block (value is in reg1)
(assign, "$disable_npc_complaints", reg1),
],
),
( "disable_bodyguard", xgm_ov_checkbox , [],
"Disable Bodyguards:", 0,
"Setting for disabling companions as bodyguards", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$disable_bodyguards"),
],
[ # update block (value is in reg1)
(assign, "$disable_bodyguards", reg1),
],
),
("camp_realistic_wounding", xgm_ov_checkbox, [], "Realistic Casualties:", 0,
"Toggles realistic wounding for other damage types", 0,
[(try_begin),
(eq, "$g_realistic_wounding", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_realistic_wounding", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_realistic_wounding", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_realistic_wounding", 1),
(try_end),
],
),
("enable_shield_bash", xgm_ov_combolabel, ["Disabled", "Player Only", "All Combatants"], "Shield Bash:", 0,
"Setting for Diplomacy's prejudice changes.", 0,
[
(assign, reg1, "$g_enable_shield_bash"),
],
[
(assign, "$g_enable_shield_bash", reg1),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
( "dplmc_horsespeed", xgm_ov_checkbox , [],
"Diplomacy - Horse Speed:", 0,
"Setting for Diplomacy's horse speed changes", 0,
[ # initialization block (set value in reg1)
(store_sub,reg1,1,"$g_dplmc_horse_speed"),
],
[ # update block (value is in reg1)
(store_sub,"$g_dplmc_horse_speed",1,reg1),
],
),
( "dplmc_battlecontinue", xgm_ov_checkbox , [],
"Diplomacy - Battle Continuation:", 0,
"Setting for Diplomacy's battle continuation", 0,
[ # initialization block (set value in reg1)
(store_sub,reg1,1,"$g_dplmc_battle_continuation"),
],
[ # update block (value is in reg1)
(store_sub,"$g_dplmc_battle_continuation",1,reg1),
],
),
( "dplmc_disguise", xgm_ov_checkbox , [],
"Diplomacy - Disguise System:", 0,
"Setting for Diplomacy's disguise system", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$g_dplmc_player_disguise"),
],
[ # update block (value is in reg1)
(assign, "$g_dplmc_player_disguise", reg1),
],
),
( "dplmc_terrain_advantage", xgm_ov_checkbox , [],
"Diplomacy - Autocalc Terrain Advantage:", 0,
"Setting for Diplomacy's terrain advantage.", 0,
[ # initialization block (set value in reg1)
(try_begin),
(eq, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_ENABLE),
(assign, reg1, 1),
(try_end),
],
[ # update block (value is in reg1)
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_terrain_advantage", DPLMC_TERRAIN_ADVANTAGE_ENABLE),
(try_end),
],
),
( "dplmc_lord_recycling", xgm_ov_checkbox , [],
"Diplomacy - Returning From Exile:", 0,
"Setting for Diplomacy's terrain advantage.", 0,
[ # initialization block (set value in reg1)
(try_begin),
(eq, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_ENABLE),
(assign, reg1, 1),
(try_end),
],
[ # update block (value is in reg1)
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_lord_recycling", DPLMC_LORD_RECYCLING_ENABLE),
(try_end),
],
),
("dplmc_ai_changes_a", xgm_ov_combolabel, ["Disabled", "Low", "Medium", "High"], "Diplomacy - AI Changes:", 0,
"Setting for Diplomacy's AI changes.", 0,
[
(try_begin),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_LOW),
(assign, reg1, 1),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_MEDIUM),
(assign, reg1, 2),
(else_try),
(eq, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_HIGH),
(assign, reg1, 3),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_LOW),
(else_try),
(eq, reg1, 2),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_MEDIUM),
(else_try),
(eq, reg1, 3),
(assign, "$g_dplmc_ai_changes", DPLMC_AI_CHANGES_HIGH),
(try_end),
],
),
("dplmc_gold_changes", xgm_ov_combolabel, ["Disabled", "Low", "Medium", "High"], "Diplomacy - Economy Changes:", 0,
"Setting for Diplomacy's economy changes.", 0,
[
(try_begin),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_DISABLE),
(assign, reg1, 0),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_LOW),
(assign, reg1, 1),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_MEDIUM),
(assign, reg1, 2),
(else_try),
(eq, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_HIGH),
(assign, reg1, 3),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_DISABLE),
(else_try),
(eq, reg1, 1),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_LOW),
(else_try),
(eq, reg1, 2),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_MEDIUM),
(else_try),
(eq, reg1, 3),
(assign, "$g_dplmc_gold_changes", DPLMC_GOLD_CHANGES_HIGH),
(try_end),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
("minimap_setting", xgm_ov_combolabel, ["Compass Style", "Small Minimap", "Medium Minimap", "Large Minimap", "Disabled"], "Battle Minimap Overlay:", 0,
"Setting for the minimap.", 0,
[
(try_begin),
(eq, "$g_minimap_style", -1),
(assign, reg1, 4),
(else_try),
(assign, reg1, "$g_minimap_style"),
(try_end),
],
[
(try_begin),
(eq, reg1, 4),
(assign, "$g_minimap_style", -1),
(else_try),
(assign, "$g_minimap_style", reg1),
(try_end),
],
),
("minimap_setting", xgm_ov_combolabel, ["Disabled", "Only Allies", "Only Enemies", "All Troops"], "Troop HP Bars:", 0,
"Setting for troop HP bars.", 0,
[
(try_begin), # Ally
(eq, "$g_hp_bar_enemy", 0),
(eq, "$g_hp_bar_ally", 1),
(assign, reg1, 1),
(else_try), # Enemy
(eq, "$g_hp_bar_enemy", 1),
(eq, "$g_hp_bar_ally", 0),
(assign, reg1, 2),
(else_try), # Both
(eq, "$g_hp_bar_enemy", 1),
(eq, "$g_hp_bar_ally", 1),
(assign, reg1, 3),
(else_try), # None
(assign, reg1, 0),
(try_end),
],
[
(try_begin), # Ally
(eq, reg1, 1),
(assign, "$g_hp_bar_enemy", 0),
(assign, "$g_hp_bar_ally", 1),
(else_try), # Enemy
(eq, reg1, 2),
(assign, "$g_hp_bar_enemy", 1),
(assign, "$g_hp_bar_ally", 0),
(else_try), # Both
(eq, reg1, 3),
(assign, "$g_hp_bar_enemy", 1),
(assign, "$g_hp_bar_ally", 1),
(else_try), # None
(assign, "$g_hp_bar_enemy", 0),
(assign, "$g_hp_bar_ally", 0),
(try_end),
],
),
("minimap_setting", xgm_ov_numberbox, [3,81], "HP Bar Distance Limit:", 0,
"Setting for the HP Bars.", 0,
[
(assign, reg1, "$g_hp_bar_dis_limit"),
],
[
(assign, "$g_hp_bar_dis_limit", reg1),
],
),
("camp_troop_ratio_bar", xgm_ov_checkbox, [], "Troop ratio bar:", 0,
"Toggles troop ratio bar", 0,
[(try_begin),
(eq, "$g_troop_ratio_bar", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_troop_ratio_bar", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_troop_ratio_bar", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_troop_ratio_bar", 1),
(try_end),
],
),
("camp_decapitation", xgm_ov_checkbox, [], "Decapitation:", 0,
"Toggles Decapitation", 0,
[(try_begin),
(eq, "$g_decapitation_enabled", 0),
(assign, reg1, 0),
(else_try),
(eq, "$g_decapitation_enabled", 1),
(assign, reg1, 1),
(try_end),
],
[
(try_begin),
(eq, reg1, 0),
(assign, "$g_decapitation_enabled", 0),
(else_try),
(eq, reg1, 1),
(assign, "$g_decapitation_enabled", 1),
(try_end),
],
),
("horizontal_divide", xgm_ov_line, [], "", 0,"", 0,[],[],),
( "op_cheatmode", xgm_ov_checkbox , [],
"Cheat mode:", 0,
"This sets the in-game cheat mode", 0,
[ # initialization block (set value in reg1)
(assign, reg1, "$cheat_mode"),
],
[ # update block (value is in reg1)
(assign, "$cheat_mode", reg1),
],
),
] # mod_options
# TODO: add option pages here
# collation of all *_mod_options.py from active mods
# import and merge related variables from all {active_mod}_mod_options.py for all active mods
#try:
# from modmerger_options import options, mods_active
# from modmerger import mod_get_process_order, mod_is_active
# from util_common import add_objects
# modcomp_name = "mod_options"
# var_list = ["mod_options",]
#from modmerger import modmerge
#modmerge(var_set)
# mod_process_order = mod_get_process_order(modcomp_name)
# vars_to_import= ["mod_options"]
# for x in mod_process_order:
# if(mod_is_active(x) and x <> "xgm_mod_options"): # must exclude this file since we are using this file as base
# try:
#mergefn_name = "modmerge_%s"%(modcomp_name)
# target_module_name = "%s_%s"%(x,modcomp_name)
# _temp = __import__( target_module_name , globals(), locals(), vars_to_import,-1)
# logger.info("Merging objects for component \"%s\" from mod \"%s\"..."%(modcomp_name,x))
#
# add_objects(mod_options, _temp.mod_options) # import from target module.
#
# # TODO: collect option pages
# except ImportError:
# errstring = "Failed importing for component \"%s\" for mod \"%s\"." % (modcomp_name, x)
# logger.debug(errstring)
# else:
# errstring = "Mod \"%s\" not active for Component \"%s\"." % (x, modcomp_name)
# logger.debug(errstring)
#except:
# raise
# collation end
# At this point, mod_options will contain the list of all mod_options specified.
## utility functions
from util_wrappers import *
# helper wrapper to access mod_options
class ModOptionWrapper(BaseWrapper):
def __init__(self, _data):
# verify _data
if( not isinstance(_data,TupleType) or (len(_data)<2)):
raise ValueError("ItemSetWrapper: Wrapped must be a tuple.")
BaseWrapper.__init__(self,_data)
def GetId(self):
return self.data[0]
def GetType(self):
return self.data[1]
def GetParameters(self):
if len(self.data) >2:
return self.data[2]
return None
def GetParameter(self, i):
if len(self.data) >2:
return self.data[2][i]
return None
def GetTextLabel(self):
if len(self.data) >3:
return self.data[3]
return None
def GetTextLabelFlags(self):
if len(self.data) >4:
return self.data[4]
return None
def GetDescription(self):
if len(self.data) >5:
return self.data[5]
return None
def GetDescriptionFlags(self):
if len(self.data) >6:
return self.data[6]
return None
def GetInitializeBlock(self):
if len(self.data) >7:
return OpBlockWrapper(self.data[7])
return None
def GetUpdateBlock(self):
if len(self.data) >8:
return OpBlockWrapper(self.data[8])
return None
def GetHeight(self):
if self.GetType() == xgm_ov_line:
return xgm_mod_options_line_height
elif self.GetType() in [xgm_ov_checkbox, xgm_ov_numberbox, xgm_ov_combolabel]:
return xgm_mod_options_property_height
return 0 # no other types supported
## class ModOptionWrapper
# this function will compute the total height required for a list of mod_options.
def mod_options_get_total_height(_mod_options = mod_options):
height = 0
for x in _mod_options:
aModOption = ModOptionWrapper(x)
height += aModOption.GetHeight()
# for x in _mod_options:
return height;
## mod_options_get_total_height
| 1,469
| 15
| 368
|
8abd70df157d14db679e659f636c0cd688861cb3
| 6,182
|
py
|
Python
|
examples/pde/utilities3.py
|
mkhodak/relax
|
f6b5a318d74fc1209ba67ec95d2118698194f9c5
|
[
"MIT"
] | 11
|
2021-10-01T17:23:18.000Z
|
2022-03-31T22:10:36.000Z
|
examples/pde/utilities3.py
|
mkhodak/relax
|
f6b5a318d74fc1209ba67ec95d2118698194f9c5
|
[
"MIT"
] | null | null | null |
examples/pde/utilities3.py
|
mkhodak/relax
|
f6b5a318d74fc1209ba67ec95d2118698194f9c5
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import scipy.io
import h5py
import torch.nn as nn
#################################################
#
# Utilities
#
#################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
# normalization, pointwise gaussian
# normalization, Gaussian
# normalization, scaling by range
#loss function with rel/abs Lp loss
# A simple feedforward neural network
| 26.761905
| 113
| 0.550793
|
import torch
import numpy as np
import scipy.io
import h5py
import torch.nn as nn
#################################################
#
# Utilities
#
#################################################
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# reading data
class MatReader(object):
def __init__(self, file_path, to_torch=True, to_cuda=False, to_float=True):
super(MatReader, self).__init__()
self.to_torch = to_torch
self.to_cuda = to_cuda
self.to_float = to_float
self.file_path = file_path
self.data = None
self.old_mat = None
self._load_file()
def _load_file(self):
try:
self.data = scipy.io.loadmat(self.file_path)
self.old_mat = True
except ValueError:
self.data = h5py.File(self.file_path)
self.old_mat = False
def load_file(self, file_path):
self.file_path = file_path
self._load_file()
def read_field(self, field):
x = self.data[field]
if not self.old_mat:
x = x[()]
x = np.transpose(x, axes=range(len(x.shape) - 1, -1, -1))
if self.to_float:
x = x.astype(np.float32)
if self.to_torch:
x = torch.from_numpy(x)
if self.to_cuda:
x = x.cuda()
return x
def set_cuda(self, to_cuda):
self.to_cuda = to_cuda
def set_torch(self, to_torch):
self.to_torch = to_torch
def set_float(self, to_float):
self.to_float = to_float
# normalization, pointwise gaussian
class UnitGaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
# x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T
self.mean = torch.mean(x, 0)
self.std = torch.std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
if sample_idx is None:
std = self.std + self.eps # n
mean = self.mean
else:
if len(self.mean.shape) == len(sample_idx[0].shape):
std = self.std[sample_idx] + self.eps # batch*n
mean = self.mean[sample_idx]
if len(self.mean.shape) > len(sample_idx[0].shape):
std = self.std[:,sample_idx]+ self.eps # T*batch*n
mean = self.mean[:,sample_idx]
# x is in shape of batch*n or T*batch*n
x = (x * std) + mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, Gaussian
class GaussianNormalizer(object):
def __init__(self, x, eps=0.00001):
super(GaussianNormalizer, self).__init__()
self.mean = torch.mean(x)
self.std = torch.std(x)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x, sample_idx=None):
x = (x * (self.std + self.eps)) + self.mean
return x
def cuda(self):
self.mean = self.mean.cuda()
self.std = self.std.cuda()
def cpu(self):
self.mean = self.mean.cpu()
self.std = self.std.cpu()
# normalization, scaling by range
class RangeNormalizer(object):
def __init__(self, x, low=0.0, high=1.0):
super(RangeNormalizer, self).__init__()
mymin = torch.min(x, 0)[0].view(-1)
mymax = torch.max(x, 0)[0].view(-1)
self.a = (high - low)/(mymax - mymin)
self.b = -self.a*mymax + high
def encode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = self.a*x + self.b
x = x.view(s)
return x
def decode(self, x):
s = x.size()
x = x.view(s[0], -1)
x = (x - self.b)/self.a
x = x.view(s)
return x
#loss function with rel/abs Lp loss
class LpLoss(object):
def __init__(self, d=2, p=2, size_average=True, reduction=True):
super(LpLoss, self).__init__()
#Dimension and Lp-norm type are postive
assert d > 0 and p > 0
self.d = d
self.p = p
self.reduction = reduction
self.size_average = size_average
def abs(self, x, y):
num_examples = x.size()[0]
#Assume uniform mesh
h = 1.0 / (x.size()[1] - 1.0)
all_norms = (h**(self.d/self.p))*torch.norm(x.view(num_examples,-1) - y.view(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(all_norms)
else:
return torch.sum(all_norms)
return all_norms
def rel(self, x, y):
num_examples = x.size()[0]
diff_norms = torch.norm(x.reshape(num_examples,-1) - y.reshape(num_examples,-1), self.p, 1)
y_norms = torch.norm(y.reshape(num_examples,-1), self.p, 1)
if self.reduction:
if self.size_average:
return torch.mean(diff_norms/y_norms)
else:
return torch.sum(diff_norms/y_norms)
return diff_norms/y_norms
def __call__(self, x, y):
return self.rel(x, y)
# A simple feedforward neural network
class DenseNet(torch.nn.Module):
def __init__(self, layers, nonlinearity, out_nonlinearity=None, normalize=False):
super(DenseNet, self).__init__()
self.n_layers = len(layers) - 1
assert self.n_layers >= 1
self.layers = nn.ModuleList()
for j in range(self.n_layers):
self.layers.append(nn.Linear(layers[j], layers[j+1]))
if j != self.n_layers - 1:
if normalize:
self.layers.append(nn.BatchNorm1d(layers[j+1]))
self.layers.append(nonlinearity())
if out_nonlinearity is not None:
self.layers.append(out_nonlinearity())
def forward(self, x):
for _, l in enumerate(self.layers):
x = l(x)
return x
| 4,841
| 51
| 828
|
15b531407df3e093f666b046edd03aed1f14e76a
| 4,874
|
py
|
Python
|
book_search/models.py
|
drogers141/book-search
|
0745eb3b25023a44da4c6e7fc4d96de086549f04
|
[
"MIT"
] | null | null | null |
book_search/models.py
|
drogers141/book-search
|
0745eb3b25023a44da4c6e7fc4d96de086549f04
|
[
"MIT"
] | null | null | null |
book_search/models.py
|
drogers141/book-search
|
0745eb3b25023a44da4c6e7fc4d96de086549f04
|
[
"MIT"
] | null | null | null |
from io import StringIO
import re
from pathlib import Path
import logging
from django.db import models
from django.conf import settings
from bs4 import BeautifulSoup
from tika import parser
logger = logging.getLogger(__name__)
class TikaParseError(RuntimeError):
"""Raised when the conversion of a document into html by Tika fails."""
def extract_author_and_title(metadata: dict) -> (str, str):
"""Try to get the author and title from the metadata.
Return empty strings if not found."""
author, title = '', ''
for key in ('Author', 'author', 'dc:creator', 'creator', 'meta:author'):
if key in metadata:
author = metadata[key]
break
for key in ('Title', 'title', 'dc:title', 'meta:title'):
if key in metadata:
title = metadata[key]
break
return author, title
class ParentDocument(models.Model):
"""Each book/file is represented here.
"""
# source document's full path
filepath = models.CharField(unique=True, max_length=1024)
# try to get the author and title from the document metadata
# but it's not always there
author = models.CharField(max_length=512, blank=True, default='')
title = models.CharField(max_length=512, blank=True, default='')
def convert_to_html_child_pages(self, clean=True):
"""Convert book/file at filepath to html pages.
This constructs a ChildPage object for each page of the document.
Pages are determined by Tika's parsing.
Populates author and title if available in the metadata.
:param clean - if True clean non-ascii whitespace
"""
try_count, successful_parse = 0, False
while try_count < settings.TIKA_PARSE_MAX_RETRY:
if settings.TIKA_CONFIG_FILE:
data = parser.from_file(str(self.filepath), xmlContent=True, config_path=settings.TIKA_CONFIG_FILE)
else:
data = parser.from_file(str(self.filepath), xmlContent=True)
if data['status'] == 200:
successful_parse = True
break
if not successful_parse:
logger.error('Failed to parse file: %s', self.filepath)
author, title = extract_author_and_title(data['metadata'])
self.author, self.title = author, title
self.save()
soup = BeautifulSoup(data['content'], features='lxml')
# convert all pages successfully before creating children
pages = []
for i, content in enumerate(soup.find_all('div', attrs={'class': 'page'})):
_buffer = StringIO()
_buffer.write(str(content))
parsed_content = parser.from_buffer(_buffer.getvalue(), xmlContent=True)
text = parsed_content['content'].strip()
if clean:
text = re.sub(r' +\n', '\n', parsed_content['content'].strip().replace('\xa0', ' '))
# remove the html head from the doc so it doesn't cause any garbage in ES highlights
page_soup = BeautifulSoup(text, features='lxml')
page_soup.head.extract()
pages.append(page_soup.prettify())
for i, html in enumerate(pages):
child = ChildPage(parent=self, page_number=i+1, html_content=html,
author=self.author, title=self.title,
parent_doc_id=self.id)
if i == len(pages) - 1:
child.is_last_page = True
child.save()
class ChildPage(models.Model):
"""Each page of a book/file is represented by a ChildPage.
With the initial implementation, this model will also have the html_content
field filled with the full text of the page. This is very inefficient
space-wise as you are storing the full text in the database as well as in
Elasticsearch. But it allows reading the text online and being able to
navigate directly from the search to the location in the text.
The reason that it is mandatory now is due to using django-elasticsearch-dsl.
In the future, we can get rid of django-es-dsl and then allow an option to
not store the full text to save space.
"""
parent = models.ForeignKey(ParentDocument, on_delete=models.CASCADE)
page_number = models.IntegerField()
html_content = models.TextField()
is_last_page = models.BooleanField(default=False)
# need to duplicate keys from parent so django-elasticsearch-dsl can access them
author = models.CharField(max_length=512)
title = models.CharField(max_length=512)
parent_doc_id = models.IntegerField()
| 38.078125
| 115
| 0.649569
|
from io import StringIO
import re
from pathlib import Path
import logging
from django.db import models
from django.conf import settings
from bs4 import BeautifulSoup
from tika import parser
logger = logging.getLogger(__name__)
class TikaParseError(RuntimeError):
"""Raised when the conversion of a document into html by Tika fails."""
def extract_author_and_title(metadata: dict) -> (str, str):
"""Try to get the author and title from the metadata.
Return empty strings if not found."""
author, title = '', ''
for key in ('Author', 'author', 'dc:creator', 'creator', 'meta:author'):
if key in metadata:
author = metadata[key]
break
for key in ('Title', 'title', 'dc:title', 'meta:title'):
if key in metadata:
title = metadata[key]
break
return author, title
class ParentDocument(models.Model):
"""Each book/file is represented here.
"""
# source document's full path
filepath = models.CharField(unique=True, max_length=1024)
# try to get the author and title from the document metadata
# but it's not always there
author = models.CharField(max_length=512, blank=True, default='')
title = models.CharField(max_length=512, blank=True, default='')
def __str__(self):
return f"id: {self.id} {Path(self.filepath).name}"
def convert_to_html_child_pages(self, clean=True):
"""Convert book/file at filepath to html pages.
This constructs a ChildPage object for each page of the document.
Pages are determined by Tika's parsing.
Populates author and title if available in the metadata.
:param clean - if True clean non-ascii whitespace
"""
try_count, successful_parse = 0, False
while try_count < settings.TIKA_PARSE_MAX_RETRY:
if settings.TIKA_CONFIG_FILE:
data = parser.from_file(str(self.filepath), xmlContent=True, config_path=settings.TIKA_CONFIG_FILE)
else:
data = parser.from_file(str(self.filepath), xmlContent=True)
if data['status'] == 200:
successful_parse = True
break
if not successful_parse:
logger.error('Failed to parse file: %s', self.filepath)
author, title = extract_author_and_title(data['metadata'])
self.author, self.title = author, title
self.save()
soup = BeautifulSoup(data['content'], features='lxml')
# convert all pages successfully before creating children
pages = []
for i, content in enumerate(soup.find_all('div', attrs={'class': 'page'})):
_buffer = StringIO()
_buffer.write(str(content))
parsed_content = parser.from_buffer(_buffer.getvalue(), xmlContent=True)
text = parsed_content['content'].strip()
if clean:
text = re.sub(r' +\n', '\n', parsed_content['content'].strip().replace('\xa0', ' '))
# remove the html head from the doc so it doesn't cause any garbage in ES highlights
page_soup = BeautifulSoup(text, features='lxml')
page_soup.head.extract()
pages.append(page_soup.prettify())
for i, html in enumerate(pages):
child = ChildPage(parent=self, page_number=i+1, html_content=html,
author=self.author, title=self.title,
parent_doc_id=self.id)
if i == len(pages) - 1:
child.is_last_page = True
child.save()
class ChildPage(models.Model):
"""Each page of a book/file is represented by a ChildPage.
With the initial implementation, this model will also have the html_content
field filled with the full text of the page. This is very inefficient
space-wise as you are storing the full text in the database as well as in
Elasticsearch. But it allows reading the text online and being able to
navigate directly from the search to the location in the text.
The reason that it is mandatory now is due to using django-elasticsearch-dsl.
In the future, we can get rid of django-es-dsl and then allow an option to
not store the full text to save space.
"""
parent = models.ForeignKey(ParentDocument, on_delete=models.CASCADE)
page_number = models.IntegerField()
html_content = models.TextField()
is_last_page = models.BooleanField(default=False)
# need to duplicate keys from parent so django-elasticsearch-dsl can access them
author = models.CharField(max_length=512)
title = models.CharField(max_length=512)
parent_doc_id = models.IntegerField()
def url(self):
return f"/{self.parent_doc_id}/{self.page_number}/"
def __str__(self):
return (f"{self.author} - {self.title} - page {self.page_number}")
| 182
| 0
| 81
|
7b9c55eaa5d05bc09b14fe1a2ce8e97213b9c0ef
| 2,284
|
py
|
Python
|
bminf/core/context.py
|
AdamBear/BMInf
|
8e650dc30e3ed9d7d628153b0a4dbd76d97ea948
|
[
"Apache-2.0"
] | 206
|
2021-09-23T08:55:29.000Z
|
2022-03-26T13:15:41.000Z
|
bminf/core/context.py
|
AdamBear/BMInf
|
8e650dc30e3ed9d7d628153b0a4dbd76d97ea948
|
[
"Apache-2.0"
] | 24
|
2021-09-24T05:54:39.000Z
|
2022-03-25T01:44:49.000Z
|
bminf/core/context.py
|
AdamBear/BMInf
|
8e650dc30e3ed9d7d628153b0a4dbd76d97ea948
|
[
"Apache-2.0"
] | 34
|
2021-09-26T02:17:29.000Z
|
2022-03-28T07:01:54.000Z
|
from typing import List, Tuple, Type
from .tensor import Tensor
from .device import Device
from .allocator import Allocator
from cpm_kernels.library import cudart
import numpy as np
import logging
logger = logging.getLogger(__name__)
| 30.453333
| 91
| 0.612522
|
from typing import List, Tuple, Type
from .tensor import Tensor
from .device import Device
from .allocator import Allocator
from cpm_kernels.library import cudart
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Context:
def __init__(self,
device_idx : List[int],
allocators : List[Allocator]
) -> None:
assert len(device_idx) > 0, "device_idx must be a non-empty list"
assert len(device_idx) == len(allocators)
self.__devices = [
Device(idx) for idx in device_idx
]
self.__calc_streams = {}
for d in self.__devices:
with d:
self.__calc_streams[d.idx] = cudart.cudaStreamCreate().value
self.__allocators = {
device_idx : allocator for device_idx, allocator in zip(device_idx, allocators)
}
def allocate(self, shape : int, dtype : np.dtype) -> Tensor:
device = Device(cudart.cudaGetDevice())
allocator = self.__allocators[device.idx]
dtype = np.dtype(dtype)
itemsize = dtype.itemsize
nbytes = int(np.prod(shape) * itemsize)
mem = allocator.allocate(nbytes, self.__calc_streams[device.idx])
return Tensor(mem, shape, dtype)
def free(self, tensor : Tensor):
allocator = self.__allocators[tensor.device_id]
tensor._released = True
allocator.free(tensor._memory)
def device(self, device_idx : int) -> Device:
return self.__devices[device_idx]
@property
def current_stream(self):
device_idx = cudart.cudaGetDevice()
return self.__calc_streams[device_idx]
def memory_stats(self):
ret = {}
for device_idx, allocator in self.__allocators.items():
ret[device_idx] = allocator.memory_stats()
return ret
def free_all(self):
for _, allocator in self.__allocators.items():
allocator.free_all()
def __del__(self):
try:
self.free_all()
for stream in self.__calc_streams.values():
cudart.cudaStreamDestroy(stream)
except Exception:
# logger.exception("Exception in Context.__del__")
pass
| 1,785
| 243
| 23
|
d79f6521598d0b35ad0abac23c970dfac3a65db6
| 3,999
|
py
|
Python
|
code/python_scripts/dlinked_list.py
|
lukaschoebel/LUMOS
|
5d084e487d937957896a58ef3ab719f86074fa9a
|
[
"MIT"
] | null | null | null |
code/python_scripts/dlinked_list.py
|
lukaschoebel/LUMOS
|
5d084e487d937957896a58ef3ab719f86074fa9a
|
[
"MIT"
] | null | null | null |
code/python_scripts/dlinked_list.py
|
lukaschoebel/LUMOS
|
5d084e487d937957896a58ef3ab719f86074fa9a
|
[
"MIT"
] | null | null | null |
if __name__ == "__main__":
dlinkedList = DoublyLinkedList(10)
dlinkedList.append(20)
dlinkedList.append(30)
dlinkedList.prepend(-5)
dlinkedList.prepend(-8)
dlinkedList.insert(value=12, index=2)
dlinkedList.print_list()
dlinkedList.remove(index=5)
dlinkedList.insert(value=30, index=4)
dlinkedList.append(55)
dlinkedList.print_list()
dlinkedList.print_head()
dlinkedList.print_tail()
| 26.483444
| 85
| 0.523631
|
class Node:
def __init__(self, value):
self.value = value
self.prev = None
self.next = None
class DoublyLinkedList:
def __init__(self, value):
self.head = Node(value)
self.tail = self.head
self.length = 1
def append(self, value):
''' Adds a value to the end of a doubly linked list
type: value
'''
self.length += 1
postNode = Node(value)
# Wire the postNode
self.tail.next = postNode
postNode.prev = self.tail
# Sets new tail node
self.tail = postNode
def prepend(self, value):
''' Adds a value to the beginning of a doubly linked list
type: value
'''
self.length += 1
preNode = Node(value)
# Wire the preNode
preNode.next = self.head
self.head.prev = preNode
# Sets new head node
self.head = preNode
def insert(self, value, index):
''' Inserts a value in the DLL at a provided index position
type: value
type: index: str
'''
if not index in range(self.length):
print("ERROR! This index does not exist!")
return
elif index == 0:
self.prepend(value)
else:
self.length += 1
insertNode = Node(value)
currentNode = self.head
for position in range(self.length - 1):
if position == index - 1:
insertNode.next = currentNode.next
currentNode.next.prev = insertNode
insertNode.prev = currentNode
currentNode.next = insertNode
break
currentNode = currentNode.next
def remove(self, index):
''' Removes a node from a given index
type: index: int
'''
if not index in range(self.length + 1):
print("ERROR! This index does not exist!")
return
if index == 0:
# Remove head of the DLL
self.head = self.head.next
self.head.prev = None
elif index == self.length - 1:
# Remove tail of the DLL
self.tail = self.tail.prev
self.tail.next = None
else:
# Introduce a temporary node for
# traversing through the list
currentNode = self.head
for position in range(self.length - 1):
if position == index:
currentNode.prev.next = currentNode.next
currentNode.next.prev = currentNode.prev
break
currentNode = currentNode.next
# Decrease length of the list
self.length -= 1
def print_list(self):
'''
Print the linked list
'''
currentNode = self.head
print(f"<<<<<<< {self.length} >>>>>>>")
for index in range(self.length):
nextValue = currentNode.next.value if currentNode.next else 'None'
print(f"{index}: {currentNode.value} <-> {nextValue}")
currentNode = currentNode.next
print(f"<<<<<<<<.>>>>>>>>")
def print_head(self):
print(f">> head: {self.head.value}") if self.head else print(">> head: None")
def print_tail(self):
print(f">> tail: {self.tail.value}") if self.tail else print(">> tail: None")
if __name__ == "__main__":
dlinkedList = DoublyLinkedList(10)
dlinkedList.append(20)
dlinkedList.append(30)
dlinkedList.prepend(-5)
dlinkedList.prepend(-8)
dlinkedList.insert(value=12, index=2)
dlinkedList.print_list()
dlinkedList.remove(index=5)
dlinkedList.insert(value=30, index=4)
dlinkedList.append(55)
dlinkedList.print_list()
dlinkedList.print_head()
dlinkedList.print_tail()
| 345
| 3,131
| 71
|
7893b475e4bb1bb6f28c83e8b1af171635285c0f
| 843
|
py
|
Python
|
setup.py
|
BOLD-lab/abbreviator
|
aca379362f04033c7cd1c62ca50b68280f3799c7
|
[
"MIT"
] | null | null | null |
setup.py
|
BOLD-lab/abbreviator
|
aca379362f04033c7cd1c62ca50b68280f3799c7
|
[
"MIT"
] | null | null | null |
setup.py
|
BOLD-lab/abbreviator
|
aca379362f04033c7cd1c62ca50b68280f3799c7
|
[
"MIT"
] | null | null | null |
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
if os.environ.get('CI_COMMIT_TAG'):
version = os.environ['CI_COMMIT_TAG']
else:
version = "0.0.4"
setuptools.setup(
name="abbreviator",
version=version,
author="Stephanie Wagenaar",
author_email="stephanie.wagenaar@boldcm.eu",
description="Abbreviate Long Sentences/Names based on hyphenation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/BOLD-lab/abbreviator",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=['pyphen>=0.11.0']
)
| 28.1
| 71
| 0.679715
|
import setuptools
import os
with open("README.md", "r") as fh:
long_description = fh.read()
if os.environ.get('CI_COMMIT_TAG'):
version = os.environ['CI_COMMIT_TAG']
else:
version = "0.0.4"
setuptools.setup(
name="abbreviator",
version=version,
author="Stephanie Wagenaar",
author_email="stephanie.wagenaar@boldcm.eu",
description="Abbreviate Long Sentences/Names based on hyphenation",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/BOLD-lab/abbreviator",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=['pyphen>=0.11.0']
)
| 0
| 0
| 0
|
9c86682e5fb8a773190f40daabb31d80b79ab5ec
| 750
|
py
|
Python
|
week3/array_partition1.py
|
ravichalla/wallbreaker
|
0d587f12c60df5e4bca47f9183484a69d284d1f5
|
[
"MIT"
] | null | null | null |
week3/array_partition1.py
|
ravichalla/wallbreaker
|
0d587f12c60df5e4bca47f9183484a69d284d1f5
|
[
"MIT"
] | null | null | null |
week3/array_partition1.py
|
ravichalla/wallbreaker
|
0d587f12c60df5e4bca47f9183484a69d284d1f5
|
[
"MIT"
] | null | null | null |
'''
QUESTION:
561. Array Partition I
Given an array of 2n integers, your task is to group these integers into n pairs of integer, say (a1, b1), (a2, b2), ..., (an, bn) which makes sum of min(ai, bi) for all i from 1 to n as large as possible.
Example 1:
Input: [1,4,3,2]
Output: 4
Explanation: n is 2, and the maximum sum of pairs is 4 = min(1, 2) + min(3, 4).
Note:
n is a positive integer, which is in the range of [1, 10000].
All the integers in the array will be in the range of [-10000, 10000].
'''
'''
Ideas/thoughts:
sort and return even nums
'''
| 25.862069
| 205
| 0.64
|
'''
QUESTION:
561. Array Partition I
Given an array of 2n integers, your task is to group these integers into n pairs of integer, say (a1, b1), (a2, b2), ..., (an, bn) which makes sum of min(ai, bi) for all i from 1 to n as large as possible.
Example 1:
Input: [1,4,3,2]
Output: 4
Explanation: n is 2, and the maximum sum of pairs is 4 = min(1, 2) + min(3, 4).
Note:
n is a positive integer, which is in the range of [1, 10000].
All the integers in the array will be in the range of [-10000, 10000].
'''
class Solution(object):
def arrayPairSum(self, nums):
total=0
nums= sorted(nums)
for i in range (0,len(nums),2):
total+= nums[i]
return total
'''
Ideas/thoughts:
sort and return even nums
'''
| 141
| 2
| 49
|
ab350b87bb10980d6dc5033bd97c6e224e09e86b
| 30
|
py
|
Python
|
account_payment_fix/models/__init__.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | 1
|
2021-01-25T15:57:58.000Z
|
2021-01-25T15:57:58.000Z
|
account_payment_fix/models/__init__.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | null | null | null |
account_payment_fix/models/__init__.py
|
odoo-mastercore/odoo-argentina
|
58cdfe8610bae42f69ddb9d652a28eb3245f6a04
|
[
"MIT"
] | 2
|
2020-10-17T16:36:02.000Z
|
2021-01-24T10:20:05.000Z
|
from . import account_payment
| 15
| 29
| 0.833333
|
from . import account_payment
| 0
| 0
| 0
|
96c66bbd32ce6b5cd183eb7717b9022db143812a
| 4,881
|
py
|
Python
|
cisco_dnac_mac_lookup_runner.py
|
sarar0sa/Cisco_Mac_Lookup
|
b657b9ed0ecc60df008e02b6e008b09914cf07bf
|
[
"Apache-2.0"
] | null | null | null |
cisco_dnac_mac_lookup_runner.py
|
sarar0sa/Cisco_Mac_Lookup
|
b657b9ed0ecc60df008e02b6e008b09914cf07bf
|
[
"Apache-2.0"
] | null | null | null |
cisco_dnac_mac_lookup_runner.py
|
sarar0sa/Cisco_Mac_Lookup
|
b657b9ed0ecc60df008e02b6e008b09914cf07bf
|
[
"Apache-2.0"
] | null | null | null |
from time import sleep
import csv
from datetime import datetime
import mac_vendor_lookup
import cisco_service
if __name__ == "__main__":
# Cool banner ofc
print("""
╔═╗╦╔═╗╔═╗╔═╗ ╔╦╗╔╗╔╔═╗╔═╗ ╔╦╗╔═╗╔═╗ ╦ ╔═╗╔═╗╦╔═╦ ╦╔═╗
║ ║╚═╗║ ║ ║ ║║║║║╠═╣║ ║║║╠═╣║ ║ ║ ║║ ║╠╩╗║ ║╠═╝
╚═╝╩╚═╝╚═╝╚═╝ ═╩╝╝╚╝╩ ╩╚═╝ ╩ ╩╩ ╩╚═╝ ╩═╝╚═╝╚═╝╩ ╩╚═╝╩
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWKKNMMMMMMMMMMMMMMMMMMMMWWWMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMXl,co0NWMMMMMMMMMMMMMMXxc:xWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNd''',;cdkKNNNNNNWNKko,...oWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMO;''.....';ccllc:,. ...'kMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMWXOxdllllldxOXWMMMMMMMWNd'........ .... ..lNMMMMMMMMMMMMMMMMMMM
MMMMMMMMMN0o:,;;:clllc:;,';oONMMMMMWd'',,,'. ..... .dWMMMMMMMMMMMMMMMMMMM
MMMMMWWWO:,cdOO0K0O0K0K0klc:';dXMMMXl,'',;;. .'''''.lXMMMMMMMMMMMMMMMMMMM
MMMMMMXo;oKWM0dkkdddoo0xddkW0o',kWM0c...,lol;. . .ccoc..;cdXMMMMMMMMMMMMMMMMMMM
MMMMMXo:0MMMMWK0KXXKKKKX00NMMWK:'dWO,....';;' .. .;::,'',,lKMMMMMMMMMMMMMMMMMMM
MMMMWxc0MMMMWW0kOxxkKkk0OXWWWMMNl'kO:'........,:'........,,cKMMMMMMMMMMMMMMMMMMM
MMMMNdxWMMMMMWOxkdddxxdxkKNWWWWMK;cXd'........,,'''.....',,:kXMMMMMMMMMMMMMMMMMM
MMMMXokMMMMMMMNXXXNNXNX0KXWWWWWWNlcXXd,.'......'..'.','.'',;:oKWMMMMMMMMMMMMMMMM
MMMMXoxWMMMMMMM0olxkoxxkXWMMMMMMNloNWNd... ..................:0WMMMMMMMMMMMMMMM
MMMMNxcOWMMMMMMKkkkOOkOOXWMMMMMMO:kMMNl.. .. .l0WMMMMMMMMMMMMMM
MMMMM0:;kNWXXNKO0K0000KKXK0OONWKlcOWNd' .,oKWMMMMMMMMMMMMM
MMMMMWO;'lOxxOddooddlcdxxxlox0Oolo0W0,. .,;oKMMMMMMMMMMMMM
MMMMMMWKc..';dkOKX0KXXXK00Oxdl:;,,oOo. .'',oKWMMMMMMMMMMM
MMMMMMMMWOl,..';coddxxdol:,..,;:;..':;.. .. ..''';dKWWMMMMMMMM
MMMMMMMMMMMN0dl:;''.'',:cokO0KNWW0l..''. ... ..,,'':xXWMMMMMMM
MMMMMMMMMMMMMMMWWNXKKXXWMMMMMMMMMMNl... . ..,'',,:xNWMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0;.. .. .,;::,'cKMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWx' .,;'. ....... ..','.lXMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMK:. . .',. .. .. ....dWMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMk. .. ...cXMMM
""")
print("Starting script..")
CiscoDnacMacLookupRunner().main()
| 59.52439
| 110
| 0.562795
|
from time import sleep
import csv
from datetime import datetime
import mac_vendor_lookup
import cisco_service
class CiscoDnacMacLookupRunner():
headers = {'Content-Type': 'application/json'}
def __init__(self):
self.cisco = cisco_service.CiscoService()
self.mac_lookup = mac_vendor_lookup.MacLookup()
self.today = datetime.now()
self.filename = "mac_address_lookup_{}T{}Z.csv".format(str(self.today.date()), str(self.today.time()))
def main(self):
print("Obtaining token..")
token = self.cisco.get_dnac_jwt_token()
self.headers["X-Auth-Token"] = token
print("Fetching network devices..")
devices = self.cisco.get_network_devices(self.headers)
with open(self.filename, 'w') as csvfile:
print("MAC lookup as begun. This may take a while..")
print("Estimated run time: {} min".format(int(363/5)))
csvwriter = csv.writer(csvfile)
counter_rate_limit = 0
for item in devices:
if(counter_rate_limit == 5):
sleep(60)
counter_rate_limit = 0
details = self.cisco.get_device_enrichment_details(self.headers, item['macAddress'])
counter_rate_limit += 1
if 'links' in details['deviceDetails']['neighborTopology'][0]:
for detail in details['deviceDetails']['neighborTopology'][0]['links']:
if 'interfaceDetails' in detail and detail['id'] == "CLIENTS":
for client in detail['interfaceDetails']:
mac_address = client['clientMacAddress']
manufacturer = self.mac_lookup.lookup_mac_vendor(mac_address)
csvwriter.writerow([mac_address,manufacturer])
print("Ending script..")
print("See the result in {}".format(self.filename))
if __name__ == "__main__":
# Cool banner ofc
print("""
╔═╗╦╔═╗╔═╗╔═╗ ╔╦╗╔╗╔╔═╗╔═╗ ╔╦╗╔═╗╔═╗ ╦ ╔═╗╔═╗╦╔═╦ ╦╔═╗
║ ║╚═╗║ ║ ║ ║║║║║╠═╣║ ║║║╠═╣║ ║ ║ ║║ ║╠╩╗║ ║╠═╝
╚═╝╩╚═╝╚═╝╚═╝ ═╩╝╝╚╝╩ ╩╚═╝ ╩ ╩╩ ╩╚═╝ ╩═╝╚═╝╚═╝╩ ╩╚═╝╩
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWKKNMMMMMMMMMMMMMMMMMMMMWWWMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMXl,co0NWMMMMMMMMMMMMMMXxc:xWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMNd''',;cdkKNNNNNNWNKko,...oWMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMO;''.....';ccllc:,. ...'kMMMMMMMMMMMMMMMMMMM
MMMMMMMMMMMMWXOxdllllldxOXWMMMMMMMWNd'........ .... ..lNMMMMMMMMMMMMMMMMMMM
MMMMMMMMMN0o:,;;:clllc:;,';oONMMMMMWd'',,,'. ..... .dWMMMMMMMMMMMMMMMMMMM
MMMMMWWWO:,cdOO0K0O0K0K0klc:';dXMMMXl,'',;;. .'''''.lXMMMMMMMMMMMMMMMMMMM
MMMMMMXo;oKWM0dkkdddoo0xddkW0o',kWM0c...,lol;. . .ccoc..;cdXMMMMMMMMMMMMMMMMMMM
MMMMMXo:0MMMMWK0KXXKKKKX00NMMWK:'dWO,....';;' .. .;::,'',,lKMMMMMMMMMMMMMMMMMMM
MMMMWxc0MMMMWW0kOxxkKkk0OXWWWMMNl'kO:'........,:'........,,cKMMMMMMMMMMMMMMMMMMM
MMMMNdxWMMMMMWOxkdddxxdxkKNWWWWMK;cXd'........,,'''.....',,:kXMMMMMMMMMMMMMMMMMM
MMMMXokMMMMMMMNXXXNNXNX0KXWWWWWWNlcXXd,.'......'..'.','.'',;:oKWMMMMMMMMMMMMMMMM
MMMMXoxWMMMMMMM0olxkoxxkXWMMMMMMNloNWNd... ..................:0WMMMMMMMMMMMMMMM
MMMMNxcOWMMMMMMKkkkOOkOOXWMMMMMMO:kMMNl.. .. .l0WMMMMMMMMMMMMMM
MMMMM0:;kNWXXNKO0K0000KKXK0OONWKlcOWNd' .,oKWMMMMMMMMMMMMM
MMMMMWO;'lOxxOddooddlcdxxxlox0Oolo0W0,. .,;oKMMMMMMMMMMMMM
MMMMMMWKc..';dkOKX0KXXXK00Oxdl:;,,oOo. .'',oKWMMMMMMMMMMM
MMMMMMMMWOl,..';coddxxdol:,..,;:;..':;.. .. ..''';dKWWMMMMMMMM
MMMMMMMMMMMN0dl:;''.'',:cokO0KNWW0l..''. ... ..,,'':xXWMMMMMMM
MMMMMMMMMMMMMMMWWNXKKXXWMMMMMMMMMMNl... . ..,'',,:xNWMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM0;.. .. .,;::,'cKMMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMWx' .,;'. ....... ..','.lXMMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMK:. . .',. .. .. ....dWMMM
MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMk. .. ...cXMMM
""")
print("Starting script..")
CiscoDnacMacLookupRunner().main()
| 1,719
| 118
| 23
|
5c91270d3182c380f67eb1e558dd0deceb956262
| 1,354
|
py
|
Python
|
dependencies/scons-config/build/lib.linux-x86_64-2.7/sconsconfig/tools/llvm.py
|
maierbn/opendihu
|
577650e2f6b36a7306766b0f4176f8124458cbf0
|
[
"MIT"
] | 17
|
2018-11-25T19:29:34.000Z
|
2021-09-20T04:46:22.000Z
|
dependencies/scons-config/build/lib.linux-x86_64-2.7/sconsconfig/tools/llvm.py
|
maierbn/opendihu
|
577650e2f6b36a7306766b0f4176f8124458cbf0
|
[
"MIT"
] | 1
|
2020-11-12T15:15:58.000Z
|
2020-12-29T15:29:24.000Z
|
dependencies/scons-config/build/lib.linux-x86_64-2.7/sconsconfig/tools/llvm.py
|
maierbn/opendihu
|
577650e2f6b36a7306766b0f4176f8124458cbf0
|
[
"MIT"
] | 4
|
2018-10-17T12:18:10.000Z
|
2021-05-28T13:24:20.000Z
|
from SCons.Script import *
| 35.631579
| 111
| 0.643279
|
from SCons.Script import *
def exists(env):
return env.Detect('llvm-gcc') and env.Detect('llvm-ld')
def generate(env):
env.SetDefault(LLVMCC='llvm-gcc')
env.SetDefault(LLVMLINK='llvm-ld')
if not exists(env):
print 'Error: Could not find either or both of %s and %s.'%(repr(env['LLVMCC']), repr(env['LLVMLINK']))
env.Exit(1)
return
env['BUILDERS']['LlvmObject'] = SCons.Builder.Builder(
action=SCons.Action.Action("$LLVMCCCOM", "$LLVMCCCOMSTR"),
emitter=SCons.Defaults.StaticObjectEmitter,
prefix='$LLVMOBJPREFIX',
suffix='$LLVMOBJSUFFIX',
src_builder=['CFile', 'CXXFile'],
source_scanner=SourceFileScanner,
single_source=1)
env.SetDefault(LLVMOBJSUFFIX='.bc')
env['LLVMCCCOM'] = '$LLVMCC -o $TARGET -c $CFLAGS $CCFLAGS $_CCCOMCOM $SOURCES'
env['BUILDERS']['LlvmProgram'] = SCons.Builder.Builder(
action=SCons.Action.Action("$LLVMLINKCOM", "$LLVMLINKCOMSTR"),
emitter='$PROGEMITTER',
prefix='$PROGPREFIX',
suffix='$LLVMPROGSUFFIX',
src_suffix='$LLVMOBJSUFFIX',
src_builder='LlvmObject',
target_scanner=ProgramScanner)
env['LLVMLINKCOM'] = '$LLVMLINK -o $TARGET $LLVMLINKFLAGS $SOURCES $_LIBDIRFLAGS $_LIBFLAGS'
env['LLVMLINKFLAGS'] = []
env['LLVMPROGSUFFIX'] = '.llvm'
| 1,281
| 0
| 46
|
5c0946952b71037bb1f97ce65af023f47196a25c
| 35,474
|
py
|
Python
|
files/runs_small/cores_2/ocean.cont/power.py
|
ST4NSB/sniper-simulator-predictions
|
1f0fe2a10fda55fceea053464ea202bfe2effafc
|
[
"MIT"
] | 1
|
2021-03-08T03:39:23.000Z
|
2021-03-08T03:39:23.000Z
|
files/runs_small/cores_2/ocean.cont/power.py
|
ST4NSB/sniper-simulator-predictions
|
1f0fe2a10fda55fceea053464ea202bfe2effafc
|
[
"MIT"
] | null | null | null |
files/runs_small/cores_2/ocean.cont/power.py
|
ST4NSB/sniper-simulator-predictions
|
1f0fe2a10fda55fceea053464ea202bfe2effafc
|
[
"MIT"
] | null | null | null |
power = {'BUSES': {'Area': 1.08752,
'Bus/Area': 1.08752,
'Bus/Gate Leakage': 0.00541455,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0564625,
'Bus/Subthreshold Leakage with power gating': 0.0211734,
'Gate Leakage': 0.00541455,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0564625,
'Subthreshold Leakage with power gating': 0.0211734},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0955308,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.277723,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.852868,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.679223,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.337297,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.584077,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.377493,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.29887,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.202647,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.319665,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.59121,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.161125,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0122273,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.110483,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0904283,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.271608,
'Execution Unit/Register Files/Runtime Dynamic': 0.102656,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.293143,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.835198,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.51333,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000781008,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000781008,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000675581,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000258971,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00129901,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00353661,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0076553,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0869311,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.52956,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.213019,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.295257,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.02054,
'Instruction Fetch Unit/Runtime Dynamic': 0.606399,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.152811,
'L2/Runtime Dynamic': 0.0364529,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.72689,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.24846,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.08055,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0805499,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.10881,
'Load Store Unit/Runtime Dynamic': 1.72626,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198623,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.397245,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0704918,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0727723,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.343808,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0347031,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.624173,
'Memory Management Unit/Runtime Dynamic': 0.107475,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0592,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.562129,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0240118,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.163866,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.750006,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 6.73993,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0955837,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.277764,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.853885,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.679669,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.337724,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.584816,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.377927,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.30047,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.202914,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.319906,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.59314,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.161317,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0122428,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.110585,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0905428,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.271902,
'Execution Unit/Register Files/Runtime Dynamic': 0.102786,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.293405,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.836102,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.5167,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000782126,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000782126,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000676533,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000259327,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00130065,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00354144,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0076668,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0870411,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.53656,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.212864,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.295631,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.02789,
'Instruction Fetch Unit/Runtime Dynamic': 0.606744,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.152816,
'L2/Runtime Dynamic': 0.036542,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.73104,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.25059,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0806842,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0806842,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.1136,
'Load Store Unit/Runtime Dynamic': 1.72918,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198953,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.397907,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0706092,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0728902,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.344243,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0346814,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.624811,
'Memory Management Unit/Runtime Dynamic': 0.107572,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0739,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.562798,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0240417,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.164074,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.750914,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 6.74765,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.72117540729286,
'Runtime Dynamic': 3.72117540729286,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.569896,
'Runtime Dynamic': 0.377251,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 128.211,
'Gate Leakage': 0.799822,
'Peak Dynamic': 48.7031,
'Peak Power': 68.7978,
'Runtime Dynamic': 13.8648,
'Subthreshold Leakage': 19.2949,
'Subthreshold Leakage with power gating': 8.76959,
'Total Cores/Area': 65.2164,
'Total Cores/Gate Leakage': 0.745993,
'Total Cores/Peak Dynamic': 48.1332,
'Total Cores/Runtime Dynamic': 13.4876,
'Total Cores/Subthreshold Leakage': 12.4375,
'Total Cores/Subthreshold Leakage with power gating': 5.16621,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.569896,
'Total L3s/Runtime Dynamic': 0.377251,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 20.0947,
'Total NoCs/Area': 1.08752,
'Total NoCs/Gate Leakage': 0.00541455,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0564625,
'Total NoCs/Subthreshold Leakage with power gating': 0.0211734}}
| 73.59751
| 124
| 0.677398
|
power = {'BUSES': {'Area': 1.08752,
'Bus/Area': 1.08752,
'Bus/Gate Leakage': 0.00541455,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0564625,
'Bus/Subthreshold Leakage with power gating': 0.0211734,
'Gate Leakage': 0.00541455,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0564625,
'Subthreshold Leakage with power gating': 0.0211734},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0955308,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.277723,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.852868,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.679223,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.337297,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.584077,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.377493,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.29887,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.202647,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.319665,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.59121,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.161125,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0122273,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.110483,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0904283,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.271608,
'Execution Unit/Register Files/Runtime Dynamic': 0.102656,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.293143,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.835198,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.51333,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000781008,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000781008,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000675581,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000258971,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00129901,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00353661,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0076553,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0869311,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.52956,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.213019,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.295257,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.02054,
'Instruction Fetch Unit/Runtime Dynamic': 0.606399,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.152811,
'L2/Runtime Dynamic': 0.0364529,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.72689,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.24846,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.08055,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0805499,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.10881,
'Load Store Unit/Runtime Dynamic': 1.72626,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198623,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.397245,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0704918,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0727723,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.343808,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0347031,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.624173,
'Memory Management Unit/Runtime Dynamic': 0.107475,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0592,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.562129,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0240118,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.163866,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.750006,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 6.73993,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0955837,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.277764,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.853885,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.679669,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.337724,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.584816,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.377927,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 1.30047,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.202914,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.319906,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 6.59314,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.161317,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0122428,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.110585,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0905428,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.271902,
'Execution Unit/Register Files/Runtime Dynamic': 0.102786,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.293405,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.836102,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 3.5167,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000782126,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000782126,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000676533,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000259327,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00130065,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00354144,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0076668,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0870411,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 5.53656,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.212864,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.295631,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 8.02789,
'Instruction Fetch Unit/Runtime Dynamic': 0.606744,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.152816,
'L2/Runtime Dynamic': 0.036542,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.73104,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.25059,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0806842,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0806842,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.1136,
'Load Store Unit/Runtime Dynamic': 1.72918,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198953,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.397907,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0706092,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0728902,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.344243,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0346814,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.624811,
'Memory Management Unit/Runtime Dynamic': 0.107572,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 24.0739,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.562798,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.0240417,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.164074,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.750914,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 6.74765,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 3.72117540729286,
'Runtime Dynamic': 3.72117540729286,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.569896,
'Runtime Dynamic': 0.377251,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 128.211,
'Gate Leakage': 0.799822,
'Peak Dynamic': 48.7031,
'Peak Power': 68.7978,
'Runtime Dynamic': 13.8648,
'Subthreshold Leakage': 19.2949,
'Subthreshold Leakage with power gating': 8.76959,
'Total Cores/Area': 65.2164,
'Total Cores/Gate Leakage': 0.745993,
'Total Cores/Peak Dynamic': 48.1332,
'Total Cores/Runtime Dynamic': 13.4876,
'Total Cores/Subthreshold Leakage': 12.4375,
'Total Cores/Subthreshold Leakage with power gating': 5.16621,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.569896,
'Total L3s/Runtime Dynamic': 0.377251,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 20.0947,
'Total NoCs/Area': 1.08752,
'Total NoCs/Gate Leakage': 0.00541455,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0564625,
'Total NoCs/Subthreshold Leakage with power gating': 0.0211734}}
| 0
| 0
| 0
|
e9a2f6e36e21d2f812a566f6b88b2d9f4025924d
| 1,890
|
py
|
Python
|
app/main.py
|
alf1e/CHUM-Package-manager
|
814290e344c82a8e0fb48435a745b15ae178eefb
|
[
"MIT"
] | null | null | null |
app/main.py
|
alf1e/CHUM-Package-manager
|
814290e344c82a8e0fb48435a745b15ae178eefb
|
[
"MIT"
] | null | null | null |
app/main.py
|
alf1e/CHUM-Package-manager
|
814290e344c82a8e0fb48435a745b15ae178eefb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#########
#LICENSE#
#########
'''
MIT License
Copyright (c) 2021 ItsMeAlfie0
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
#########
#IMPORTS#
#########
import os
import sys
import urllib.request
import json
######
#CODE#
######
arg = sys.argv
if arg[1] == "--add-host":
with open("conf/hosts.json", "r") as f: data = json.load(f)
data[arg[2]] = arg[3]
with open("conf/hosts.json", "w") as e: json.dump(e)
print(f"Added host '{arg[2]}' '{arg[3]}'")
elif arg[1] == "install":
with open("conf/hosts.json", "r") as f: data = json.load(f)
host = data[arg[2]]
setup_sh = urllib.request.urlopen(f"{host}?repo={arg[3]}").read()
os.system(f"mkdir /etc/chum/{arg[3]}")
with open(f"/etc/chum/{arg[3]}/setup.sh", "w")as f:
f.write(setup_sh)
f.close()
os.system(f"sh /etc/chumj/{arg[3]}/setup.sh")
print("Package installed!")
| 29.076923
| 78
| 0.691534
|
#!/usr/bin/env python
#########
#LICENSE#
#########
'''
MIT License
Copyright (c) 2021 ItsMeAlfie0
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
#########
#IMPORTS#
#########
import os
import sys
import urllib.request
import json
######
#CODE#
######
arg = sys.argv
if arg[1] == "--add-host":
with open("conf/hosts.json", "r") as f: data = json.load(f)
data[arg[2]] = arg[3]
with open("conf/hosts.json", "w") as e: json.dump(e)
print(f"Added host '{arg[2]}' '{arg[3]}'")
elif arg[1] == "install":
with open("conf/hosts.json", "r") as f: data = json.load(f)
host = data[arg[2]]
setup_sh = urllib.request.urlopen(f"{host}?repo={arg[3]}").read()
os.system(f"mkdir /etc/chum/{arg[3]}")
with open(f"/etc/chum/{arg[3]}/setup.sh", "w")as f:
f.write(setup_sh)
f.close()
os.system(f"sh /etc/chumj/{arg[3]}/setup.sh")
print("Package installed!")
| 0
| 0
| 0
|
10235f4c22917028f59e78a277404007dacc9d74
| 1,058
|
py
|
Python
|
pin ponge.py
|
glebyad/ping-pong
|
2fabfa00b51f5c50686f8c6de10864722f3d3968
|
[
"CC0-1.0"
] | null | null | null |
pin ponge.py
|
glebyad/ping-pong
|
2fabfa00b51f5c50686f8c6de10864722f3d3968
|
[
"CC0-1.0"
] | null | null | null |
pin ponge.py
|
glebyad/ping-pong
|
2fabfa00b51f5c50686f8c6de10864722f3d3968
|
[
"CC0-1.0"
] | null | null | null |
from pygame import *
#создай окно игры
window = display.set_mode((1000, 700))
display.set_caption('догонялки')
#задай фон сцены
background = transform.scale(image.load('ping.jpg'), (1000, 700))
#создай 2 спрайта и размести их на сцене
x1 = 0
y1 = 300
x2 = 900
y2 = 300
sprite1 = transform.scale(image.load('raketka1.png'), (100, 100)
)
sprite2 = transform.scale(image.load('raketka2.jpg'), (100, 100)
)
run = True
clock = time.Clock()
FPS = 60
while run:
window.blit(background,(0, 0))
window.blit(sprite1, (x1, y1))
window.blit(sprite2, (x2, y2))
for e in event.get():
if e.type == QUIT:
run = False
speed = 4
keys_pressed = key.get_pressed()
if keys_pressed[K_w] and y1 > 5:
y1 -= speed
if keys_pressed[K_s] and y1 < 600:
y1 += speed
if keys_pressed[K_UP] and y2 > 5:
y2 -= speed
if keys_pressed[K_DOWN] and y2 < 600:
y2 += speed
display.update()
clock.tick(FPS)
| 19.592593
| 66
| 0.571834
|
from pygame import *
#создай окно игры
window = display.set_mode((1000, 700))
display.set_caption('догонялки')
#задай фон сцены
background = transform.scale(image.load('ping.jpg'), (1000, 700))
#создай 2 спрайта и размести их на сцене
x1 = 0
y1 = 300
x2 = 900
y2 = 300
sprite1 = transform.scale(image.load('raketka1.png'), (100, 100)
)
sprite2 = transform.scale(image.load('raketka2.jpg'), (100, 100)
)
run = True
clock = time.Clock()
FPS = 60
while run:
window.blit(background,(0, 0))
window.blit(sprite1, (x1, y1))
window.blit(sprite2, (x2, y2))
for e in event.get():
if e.type == QUIT:
run = False
speed = 4
keys_pressed = key.get_pressed()
if keys_pressed[K_w] and y1 > 5:
y1 -= speed
if keys_pressed[K_s] and y1 < 600:
y1 += speed
if keys_pressed[K_UP] and y2 > 5:
y2 -= speed
if keys_pressed[K_DOWN] and y2 < 600:
y2 += speed
display.update()
clock.tick(FPS)
| 0
| 0
| 0
|
4095a34c413d03e43c4c7d0136819b20e9686d8b
| 3,010
|
py
|
Python
|
containerchaos/measure_response_time.py
|
containerchaos/containerchaos
|
3e44c9587542678d6563b3f07299fb33c88a1f3e
|
[
"MIT"
] | null | null | null |
containerchaos/measure_response_time.py
|
containerchaos/containerchaos
|
3e44c9587542678d6563b3f07299fb33c88a1f3e
|
[
"MIT"
] | 9
|
2019-02-15T16:59:39.000Z
|
2019-02-26T22:42:10.000Z
|
containerchaos/measure_response_time.py
|
containerchaos/containerchaos
|
3e44c9587542678d6563b3f07299fb33c88a1f3e
|
[
"MIT"
] | 1
|
2019-07-31T13:38:51.000Z
|
2019-07-31T13:38:51.000Z
|
import csv
import datetime
import matplotlib.pyplot as plt
import pandas as pd
import requests
import seaborn as sns
def measure_response_time(url, criteria, write=True):
'''
Measures and saves an API request's response time to a CSV file
:param url: The URL for API request
:param criteria: The criteria in effect
:return: Path to a CSV file with response time in seconds with its timestamp as columns
'''
response = requests.get(url)
response_time = response.elapsed.total_seconds()
date_time = datetime.datetime.now()
fieldnames = ['timestamp', 'responseTime', 'criteria'] # Headers of the CSV file
out_path = 'Response-Times.csv'
if write:
with open(out_path, 'a') as csvFile:
writer = csv.DictWriter(csvFile, fieldnames=fieldnames)
if csvFile.tell() == 0:
writer.writeheader()
writer.writerow({'timestamp': date_time, 'responseTime': response_time, 'criteria': criteria})
csvFile.close()
return out_path
def generate_histogram(path, title):
'''
Saves a histogram with average response time per number of requests
:param path: Path to a csv file
'''
response_times = pd.read_csv(path)
criteria_dict = response_times.groupby("criteria")["responseTime"].apply(list).to_dict()
critera_keys = list(criteria_dict.keys())
criteria_values = list(criteria_dict.values())
plt.title(title)
plt.style.use("seaborn-deep")
plt.hist(x=criteria_values, bins=30, label=critera_keys)
plt.legend(loc="upper right")
plt.xlabel("Response Time in Seconds")
plt.ylabel("Number of Requests")
plt.savefig(title + " Histogram")
plt.show()
def generate_density_plot(path, title):
'''
Saves a density plot with density of requests per second
:param path: Path to a csv file
'''
response_times = pd.read_csv(path)
criteria_dict = response_times.groupby("criteria")["responseTime"].apply(list).to_dict()
critera_keys = list(criteria_dict.keys())
# criteria_values = list(criteria_dict.values())
for criteria in critera_keys:
subset = response_times[response_times["criteria"] == criteria]
sns.distplot(subset["responseTime"], hist=False, kde=True, kde_kws={"linewidth": 3}, label=criteria)
plt.title(title)
plt.legend(loc="upper right")
plt.xlabel("Response Time in Seconds")
plt.ylabel("Density")
plt.savefig(title + " Density Plot")
plt.show()
local_simple_csv = "output/local/simple/Response-Times.csv"
local_complex_csv = "output/local/complex/Response-Times.csv"
cloud_simple_csv = "output/gcloud/simple/Response-Times.csv"
cloud_complex_csv = "output/gcloud/complex/Response-Times.csv"
generate_histogram(local_simple_csv, "Local Machine Simple Task")
generate_density_plot(local_complex_csv, "Local Machine Complex Task")
generate_density_plot(cloud_simple_csv, "Cloud Simple Task")
generate_histogram(cloud_complex_csv, "Cloud Complex Task")
| 32.021277
| 108
| 0.707641
|
import csv
import datetime
import matplotlib.pyplot as plt
import pandas as pd
import requests
import seaborn as sns
def measure_response_time(url, criteria, write=True):
'''
Measures and saves an API request's response time to a CSV file
:param url: The URL for API request
:param criteria: The criteria in effect
:return: Path to a CSV file with response time in seconds with its timestamp as columns
'''
response = requests.get(url)
response_time = response.elapsed.total_seconds()
date_time = datetime.datetime.now()
fieldnames = ['timestamp', 'responseTime', 'criteria'] # Headers of the CSV file
out_path = 'Response-Times.csv'
if write:
with open(out_path, 'a') as csvFile:
writer = csv.DictWriter(csvFile, fieldnames=fieldnames)
if csvFile.tell() == 0:
writer.writeheader()
writer.writerow({'timestamp': date_time, 'responseTime': response_time, 'criteria': criteria})
csvFile.close()
return out_path
def generate_histogram(path, title):
'''
Saves a histogram with average response time per number of requests
:param path: Path to a csv file
'''
response_times = pd.read_csv(path)
criteria_dict = response_times.groupby("criteria")["responseTime"].apply(list).to_dict()
critera_keys = list(criteria_dict.keys())
criteria_values = list(criteria_dict.values())
plt.title(title)
plt.style.use("seaborn-deep")
plt.hist(x=criteria_values, bins=30, label=critera_keys)
plt.legend(loc="upper right")
plt.xlabel("Response Time in Seconds")
plt.ylabel("Number of Requests")
plt.savefig(title + " Histogram")
plt.show()
def generate_density_plot(path, title):
'''
Saves a density plot with density of requests per second
:param path: Path to a csv file
'''
response_times = pd.read_csv(path)
criteria_dict = response_times.groupby("criteria")["responseTime"].apply(list).to_dict()
critera_keys = list(criteria_dict.keys())
# criteria_values = list(criteria_dict.values())
for criteria in critera_keys:
subset = response_times[response_times["criteria"] == criteria]
sns.distplot(subset["responseTime"], hist=False, kde=True, kde_kws={"linewidth": 3}, label=criteria)
plt.title(title)
plt.legend(loc="upper right")
plt.xlabel("Response Time in Seconds")
plt.ylabel("Density")
plt.savefig(title + " Density Plot")
plt.show()
local_simple_csv = "output/local/simple/Response-Times.csv"
local_complex_csv = "output/local/complex/Response-Times.csv"
cloud_simple_csv = "output/gcloud/simple/Response-Times.csv"
cloud_complex_csv = "output/gcloud/complex/Response-Times.csv"
generate_histogram(local_simple_csv, "Local Machine Simple Task")
generate_density_plot(local_complex_csv, "Local Machine Complex Task")
generate_density_plot(cloud_simple_csv, "Cloud Simple Task")
generate_histogram(cloud_complex_csv, "Cloud Complex Task")
| 0
| 0
| 0
|
821041c230e611989e036de3de8d4f9ba908a39e
| 1,620
|
py
|
Python
|
tracking/main.py
|
chan-w/vaccine-text-signup
|
f926aa76724ffd5fe1d473fd6cdb70ed50ee982d
|
[
"MIT"
] | null | null | null |
tracking/main.py
|
chan-w/vaccine-text-signup
|
f926aa76724ffd5fe1d473fd6cdb70ed50ee982d
|
[
"MIT"
] | null | null | null |
tracking/main.py
|
chan-w/vaccine-text-signup
|
f926aa76724ffd5fe1d473fd6cdb70ed50ee982d
|
[
"MIT"
] | null | null | null |
api_key = "AIzaSyAedPSTmyoW1ejPtwG_cSu7fEjLxOOUrXg"
# Uses the Geocode API
import requests
from urllib.parse import urlencode
#Input address here!
lat, lng = extract_lat_lng("1600 Amphitheatre Parkway, Mountain View, CA")
places_endpoint_2 = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params_2 = {
"key": api_key,
"location": f"{lat},{lng}",
"radius": "1500",
"keyword": "pharmacy"
}
params_2_encoded = urlencode(params_2)
places_url=f"{places_endpoint_2}?{params_2_encoded}"
r2 = requests.get(places_url)
# Returns the first 3 closest locations and stores it in variables within a 1500 meter radius
try:
nameVicinity0 = r2.json()['results'][0]
name0 = nameVicinity0.get('name')
vicinity0 = nameVicinity0.get('vicinity')
except:
pass
try:
nameVicinity1 = r2.json()['results'][1]
name1 = nameVicinity1.get('name')
vicinity1 = nameVicinity1.get('vicinity')
except:
pass
try:
nameVicinity2 = r2.json()['results'][2]
name2 = nameVicinity2.get('name')
vicinity2 = nameVicinity2.get('vicinity')
except:
pass
| 27.931034
| 93
| 0.683951
|
api_key = "AIzaSyAedPSTmyoW1ejPtwG_cSu7fEjLxOOUrXg"
# Uses the Geocode API
import requests
from urllib.parse import urlencode
def extract_lat_lng(address_or_postalcode, data_type = 'json'):
endpoint = f"https://maps.googleapis.com/maps/api/geocode/{data_type}"
params = {"address": address_or_postalcode, "key": api_key}
url_params = urlencode(params)
url = f"{endpoint}?{url_params}"
r = requests.get(url)
if r.status_code not in range(200, 299):
return {}
latlng = {}
try:
latlng = r.json()['results'][0]['geometry']['location']
except:
pass
return latlng.get("lat"), latlng.get("lng")
#Input address here!
lat, lng = extract_lat_lng("1600 Amphitheatre Parkway, Mountain View, CA")
places_endpoint_2 = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
params_2 = {
"key": api_key,
"location": f"{lat},{lng}",
"radius": "1500",
"keyword": "pharmacy"
}
params_2_encoded = urlencode(params_2)
places_url=f"{places_endpoint_2}?{params_2_encoded}"
r2 = requests.get(places_url)
# Returns the first 3 closest locations and stores it in variables within a 1500 meter radius
try:
nameVicinity0 = r2.json()['results'][0]
name0 = nameVicinity0.get('name')
vicinity0 = nameVicinity0.get('vicinity')
except:
pass
try:
nameVicinity1 = r2.json()['results'][1]
name1 = nameVicinity1.get('name')
vicinity1 = nameVicinity1.get('vicinity')
except:
pass
try:
nameVicinity2 = r2.json()['results'][2]
name2 = nameVicinity2.get('name')
vicinity2 = nameVicinity2.get('vicinity')
except:
pass
| 504
| 0
| 23
|
f74c328b4e8be5db4ab0478db22db83a43dfc36e
| 38,645
|
py
|
Python
|
petitions/migrations/01000_add_counties_subcounties_courts_prisons_offences.py
|
DavidWaichari/pomac
|
79273c34dc54a301ed9fd802b0c2c487b2ac5d92
|
[
"MIT"
] | null | null | null |
petitions/migrations/01000_add_counties_subcounties_courts_prisons_offences.py
|
DavidWaichari/pomac
|
79273c34dc54a301ed9fd802b0c2c487b2ac5d92
|
[
"MIT"
] | null | null | null |
petitions/migrations/01000_add_counties_subcounties_courts_prisons_offences.py
|
DavidWaichari/pomac
|
79273c34dc54a301ed9fd802b0c2c487b2ac5d92
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-01-28 19:30
from django.db import migrations
| 56.25182
| 112
| 0.73712
|
# Generated by Django 2.0.1 on 2018-01-28 19:30
from django.db import migrations
def add_initial_data(apps, schema_editor):
County = apps.get_model('petitions', 'County')
Court = apps.get_model('petitions', 'Court')
SubCounty = apps.get_model('petitions', 'SubCounty')
Prison = apps.get_model('petitions', 'Prison')
Offence = apps.get_model('petitions', 'Offence')
baringo = County.objects.create(name='BARINGO')
SubCounty.objects.create(name='BARINGO EAST', county=baringo)
SubCounty.objects.create(name='BARINGO WEST', county=baringo)
SubCounty.objects.create(name='BARINGO CENTRAL', county=baringo)
SubCounty.objects.create(name='MOCHONGOI', county=baringo)
SubCounty.objects.create(name='MOGOTIO', county=baringo)
SubCounty.objects.create(name='ELDAMA RAVINE', county=baringo)
bomet = County.objects.create(name='BOMET')
SubCounty.objects.create(name='SOTIK', county=bomet)
SubCounty.objects.create(name='CHEPALUNGU', county=bomet)
SubCounty.objects.create(name='BOMET EAST', county=bomet)
SubCounty.objects.create(name='BOMET CENTRAL', county=bomet)
SubCounty.objects.create(name='KONOIN', county=bomet)
bungoma = County.objects.create(name='BUNGOMA')
SubCounty.objects.create(name='MT ELGON', county=bungoma)
SubCounty.objects.create(name='SIRISIA', county=bungoma)
SubCounty.objects.create(name='KABUCHIA', county=bungoma)
SubCounty.objects.create(name='BUMULA', county=bungoma)
SubCounty.objects.create(name='KANDUNYI', county=bungoma)
SubCounty.objects.create(name='WEBUYE', county=bungoma)
SubCounty.objects.create(name='BOKOLI', county=bungoma)
SubCounty.objects.create(name='KIMILILI', county=bungoma)
SubCounty.objects.create(name='TONGAREN', county=bungoma)
busia = County.objects.create(name='BUSIA')
SubCounty.objects.create(name='TESO NORTH', county=busia)
SubCounty.objects.create(name='TESO SOUTH', county=busia)
SubCounty.objects.create(name='NAMBALE', county=busia)
SubCounty.objects.create(name='MATAYOS', county=busia)
SubCounty.objects.create(name='BUTULA', county=busia)
SubCounty.objects.create(name='FUNYULA', county=busia)
SubCounty.objects.create(name='BUDALANGI', county=busia)
elgeiyomarakwet = County.objects.create(name='ELGEYO MARAKWET')
SubCounty.objects.create(name='MARAKWET EAST', county=elgeiyomarakwet)
SubCounty.objects.create(name='MARAKWET WEST', county=elgeiyomarakwet)
SubCounty.objects.create(name='KEIYO EAST', county=elgeiyomarakwet)
SubCounty.objects.create(name='KEIYO SOUTH', county=elgeiyomarakwet)
embu = County.objects.create(name='EMBU')
SubCounty.objects.create(name='MANYATTA', county=embu)
SubCounty.objects.create(name='RUNYENJES', county=embu)
SubCounty.objects.create(name='GACHOKA', county=embu)
SubCounty.objects.create(name='SIAKAGO', county=embu)
garissa = County.objects.create(name='GARISSA')
SubCounty.objects.create(name='TAVEDUJIS', county=garissa)
SubCounty.objects.create(name='BALAMBALA', county=garissa)
SubCounty.objects.create(name='LAGDERA', county=garissa)
SubCounty.objects.create(name='DADAAB', county=garissa)
SubCounty.objects.create(name='FAFI', county=garissa)
SubCounty.objects.create(name='IJARA', county=garissa)
homabay = County.objects.create(name='HOMA BAY')
SubCounty.objects.create(name='KASIPUL', county=homabay)
SubCounty.objects.create(name='KABONDO', county=homabay)
SubCounty.objects.create(name='KARACHUONYO', county=homabay)
SubCounty.objects.create(name='RANGWE', county=homabay)
SubCounty.objects.create(name='HOMABAY TOWN', county=homabay)
SubCounty.objects.create(name='NDHIWA', county=homabay)
SubCounty.objects.create(name='MBITA', county=homabay)
SubCounty.objects.create(name='GWASSI', county=homabay)
isiolo = County.objects.create(name='ISIOLO')
SubCounty.objects.create(name='ISIOLO NORTH', county=isiolo)
SubCounty.objects.create(name='ISIOLO SOUTH', county=isiolo)
kajiado = County.objects.create(name='KAJIADO')
SubCounty.objects.create(name='KAJIADO CENTRAL', county=kajiado)
SubCounty.objects.create(name='KAJIADO NORTH', county=kajiado)
SubCounty.objects.create(name='KAJIADO SOUTH', county=kajiado)
kakamega = County.objects.create(name='KAKAMEGA')
SubCounty.objects.create(name='LUGARI', county=kakamega)
SubCounty.objects.create(name='LIKUYANI', county=kakamega)
SubCounty.objects.create(name='MALAVA', county=kakamega)
SubCounty.objects.create(name='LURAMBI', county=kakamega)
SubCounty.objects.create(name='MAKHOLO', county=kakamega)
SubCounty.objects.create(name='MUMIAS', county=kakamega)
SubCounty.objects.create(name='MUMIAS EAST', county=kakamega)
SubCounty.objects.create(name='MATUNGU', county=kakamega)
SubCounty.objects.create(name='BUTERE', county=kakamega)
SubCounty.objects.create(name='KHWISERO', county=kakamega)
SubCounty.objects.create(name='SHINYALU', county=kakamega)
SubCounty.objects.create(name='IKOLOMANI', county=kakamega)
kericho = County.objects.create(name='KERICHO')
SubCounty.objects.create(name='AINAMOI', county=kericho)
SubCounty.objects.create(name='BELGUT', county=kericho)
SubCounty.objects.create(name='KIPKELION', county=kericho)
kiambu = County.objects.create(name='KIAMBU')
SubCounty.objects.create(name='GATUNDU SOUTH', county=kiambu)
SubCounty.objects.create(name='GATUNDU NORTH', county=kiambu)
SubCounty.objects.create(name='JUJA', county=kiambu)
SubCounty.objects.create(name='THIKA TOWN', county=kiambu)
SubCounty.objects.create(name='RUIRU GITHUNGURI', county=kiambu)
SubCounty.objects.create(name='KIAMBU', county=kiambu)
SubCounty.objects.create(name='KIAMBAA', county=kiambu)
SubCounty.objects.create(name='KABETE', county=kiambu)
SubCounty.objects.create(name='KIKUYU', county=kiambu)
SubCounty.objects.create(name='LIMURU', county=kiambu)
SubCounty.objects.create(name='LARI', county=kiambu)
kilifi = County.objects.create(name='KILIFI')
SubCounty.objects.create(name='KILIFI NORTH', county=kilifi)
SubCounty.objects.create(name='KILIFI SOUTH', county=kilifi)
SubCounty.objects.create(name='KALOLENI', county=kilifi)
SubCounty.objects.create(name='RABAI', county=kilifi)
SubCounty.objects.create(name='GANZE', county=kilifi)
SubCounty.objects.create(name='MALINDI', county=kilifi)
SubCounty.objects.create(name='MAGARINI', county=kilifi)
kirinyaga = County.objects.create(name='KIRINYAGA')
SubCounty.objects.create(name='MWEA', county=kirinyaga)
SubCounty.objects.create(name='GICHUGU', county=kirinyaga)
SubCounty.objects.create(name='NDIA', county=kirinyaga)
SubCounty.objects.create(name='KIRINYAGA CENTRAL', county=kirinyaga)
kisii = County.objects.create(name='KISII')
SubCounty.objects.create(name='BONCHARI', county=kisii)
SubCounty.objects.create(name='SOUTH MUGIRANGO', county=kisii)
SubCounty.objects.create(name='BOMACHOGE', county=kisii)
SubCounty.objects.create(name='BOBASI', county=kisii)
SubCounty.objects.create(name='GUCHA', county=kisii)
SubCounty.objects.create(name='NYARIBARI MASABA', county=kisii)
SubCounty.objects.create(name='NYARIBARI CHACHE', county=kisii)
SubCounty.objects.create(name='MATRANI', county=kisii)
SubCounty.objects.create(name='MOSOCHO', county=kisii)
kisumu = County.objects.create(name='KISUMU')
SubCounty.objects.create(name='KISUMU EAST', county=kisumu)
SubCounty.objects.create(name='KISUMU WEST', county=kisumu)
SubCounty.objects.create(name='KISUMU CENTRAL', county=kisumu)
SubCounty.objects.create(name='SEME', county=kisumu)
SubCounty.objects.create(name='NYANDO', county=kisumu)
SubCounty.objects.create(name='MUHORONI', county=kisumu)
SubCounty.objects.create(name='NYAKACH', county=kisumu)
kitui = County.objects.create(name='KITUI')
SubCounty.objects.create(name='MWINGI NORTH', county=kitui)
SubCounty.objects.create(name='MWINGI CENTRAL', county=kitui)
SubCounty.objects.create(name='MWINGI SOUTH', county=kitui)
SubCounty.objects.create(name='KITUI WEST', county=kitui)
SubCounty.objects.create(name='KITUI RURAL', county=kitui)
SubCounty.objects.create(name='KITUI TOWN', county=kitui)
SubCounty.objects.create(name='MUTITU', county=kitui)
SubCounty.objects.create(name='KITUI SOUTH', county=kitui)
kwale = County.objects.create(name='KWALE')
SubCounty.objects.create(name='MSAMBWENI', county=kwale)
SubCounty.objects.create(name='LUNGA LUNGA', county=kwale)
SubCounty.objects.create(name='MATUGA', county=kwale)
SubCounty.objects.create(name='KINANGO', county=kwale)
laikipia = County.objects.create(name='LAIKIPIA')
SubCounty.objects.create(name='LAIKIPIA WEST', county=laikipia)
SubCounty.objects.create(name='LAIKIPIA EAST', county=laikipia)
SubCounty.objects.create(name='LAIKIPIA NORTH', county=laikipia)
lamu = County.objects.create(name='LAMU')
SubCounty.objects.create(name='LAMU EAST', county=lamu)
SubCounty.objects.create(name='LAMU WEST', county=lamu)
machakos = County.objects.create(name='MACHAKOS')
SubCounty.objects.create(name='MASINGA', county=machakos)
SubCounty.objects.create(name='YATTA', county=machakos)
SubCounty.objects.create(name='KANGUNDO', county=machakos)
SubCounty.objects.create(name='MATUNGULU', county=machakos)
SubCounty.objects.create(name='KATHIANI', county=machakos)
SubCounty.objects.create(name='MAVOKO', county=machakos)
SubCounty.objects.create(name='MACHAKOS TOWN', county=machakos)
SubCounty.objects.create(name='MWALA', county=machakos)
makueni = County.objects.create(name='MAKUENI')
SubCounty.objects.create(name='MBOONI', county=makueni)
SubCounty.objects.create(name='KILOME', county=makueni)
SubCounty.objects.create(name='KAITI', county=makueni)
SubCounty.objects.create(name='MAKUENI', county=makueni)
SubCounty.objects.create(name='KIBWEZI WEST', county=makueni)
SubCounty.objects.create(name='KIBWEZI EAST', county=makueni)
mandera = County.objects.create(name='MANDERA')
SubCounty.objects.create(name='MANDERA WEST', county=mandera)
SubCounty.objects.create(name='BANISA', county=mandera)
SubCounty.objects.create(name='MANDERA NORTH', county=mandera)
SubCounty.objects.create(name='MANDERA EAST', county=mandera)
SubCounty.objects.create(name='LAFEY', county=mandera)
marsabit = County.objects.create(name='MARSABIT')
SubCounty.objects.create(name='MOYALE', county=marsabit)
SubCounty.objects.create(name='NORTH HORR', county=marsabit)
SubCounty.objects.create(name='SAKU', county=marsabit)
SubCounty.objects.create(name='LAISAMIS', county=marsabit)
meru = County.objects.create(name='MERU')
SubCounty.objects.create(name='IGEMBE SOUTH', county=meru)
SubCounty.objects.create(name='IGEMBE CENTRAL', county=meru)
SubCounty.objects.create(name='IGEMBE NORTH', county=meru)
SubCounty.objects.create(name='TIGANIA WEST', county=meru)
SubCounty.objects.create(name='TIGANIA EAST', county=meru)
SubCounty.objects.create(name='NORTH IMENTI', county=meru)
SubCounty.objects.create(name='BUURI', county=meru)
SubCounty.objects.create(name='CENTRAL IMENTI', county=meru)
SubCounty.objects.create(name='SOUTH IMENTI', county=meru)
migori = County.objects.create(name='MIGORI')
SubCounty.objects.create(name='RONGO', county=migori)
SubCounty.objects.create(name='AWENDO', county=migori)
SubCounty.objects.create(name='MIGORI EAST', county=migori)
SubCounty.objects.create(name='MIGORI WEST', county=migori)
SubCounty.objects.create(name='URIRI', county=migori)
SubCounty.objects.create(name='NYATIKE', county=migori)
SubCounty.objects.create(name='KURIA EAST', county=migori)
SubCounty.objects.create(name='KURIA WEST', county=migori)
mombasa = County.objects.create(name='MOMBASA')
SubCounty.objects.create(name='CHANGAMWE', county=mombasa)
SubCounty.objects.create(name='JOMVU', county=mombasa)
SubCounty.objects.create(name='KISAUNI', county=mombasa)
SubCounty.objects.create(name='NYALI', county=mombasa)
SubCounty.objects.create(name='LIKONI', county=mombasa)
SubCounty.objects.create(name='MVITA', county=mombasa)
muranga = County.objects.create(name='MURANGA')
SubCounty.objects.create(name='KANGEMA', county=muranga)
SubCounty.objects.create(name='MATHIOYA', county=muranga)
SubCounty.objects.create(name='KIHARU', county=muranga)
SubCounty.objects.create(name='KIGUMO', county=muranga)
SubCounty.objects.create(name='MARAGWA', county=muranga)
SubCounty.objects.create(name='KANDARA', county=muranga)
SubCounty.objects.create(name='GATANGA', county=muranga)
nairobi = County.objects.create(name='NAIROBI')
SubCounty.objects.create(name='WESTLANDS', county=nairobi)
SubCounty.objects.create(name='PARKLANDS', county=nairobi)
SubCounty.objects.create(name='DAGORETTI', county=nairobi)
SubCounty.objects.create(name='KAREN / LANGATA', county=nairobi)
SubCounty.objects.create(name='KIBIRA', county=nairobi)
SubCounty.objects.create(name='ROYSAMBU', county=nairobi)
SubCounty.objects.create(name='KASARANI', county=nairobi)
SubCounty.objects.create(name='RUARAKA', county=nairobi)
SubCounty.objects.create(name='KARIOBANGI', county=nairobi)
SubCounty.objects.create(name='KAYOLE', county=nairobi)
SubCounty.objects.create(name='EMBAKASI', county=nairobi)
SubCounty.objects.create(name='MIHANG’O', county=nairobi)
SubCounty.objects.create(name='NAIROBI WEST', county=nairobi)
SubCounty.objects.create(name='MAKADARA', county=nairobi)
SubCounty.objects.create(name='KAMUKUNJI', county=nairobi)
SubCounty.objects.create(name='STAREHE', county=nairobi)
SubCounty.objects.create(name='MATHARE', county=nairobi)
nakuru = County.objects.create(name='NAKURU')
SubCounty.objects.create(name='MOLO', county=nakuru)
SubCounty.objects.create(name='NJORO', county=nakuru)
SubCounty.objects.create(name='NAIVASHA', county=nakuru)
SubCounty.objects.create(name='GILGIL', county=nakuru)
SubCounty.objects.create(name='KURESOI SOUTH', county=nakuru)
SubCounty.objects.create(name='KURESOI NORTH', county=nakuru)
SubCounty.objects.create(name='SUBUKIA', county=nakuru)
SubCounty.objects.create(name='RONGAI', county=nakuru)
SubCounty.objects.create(name='BAHATI', county=nakuru)
SubCounty.objects.create(name='NAKURU TOWN WEST', county=nakuru)
SubCounty.objects.create(name='NAKURU TOWN EAST', county=nakuru)
nandi = County.objects.create(name='NANDI')
SubCounty.objects.create(name='TINDERET', county=nandi)
SubCounty.objects.create(name='ALDAI', county=nandi)
SubCounty.objects.create(name='NANDI HILLS', county=nandi)
SubCounty.objects.create(name='EMGWEN NORTH', county=nandi)
SubCounty.objects.create(name='EMGWEN SOUTH', county=nandi)
SubCounty.objects.create(name='MOSOP', county=nandi)
narok = County.objects.create(name='NAROK')
SubCounty.objects.create(name='KILGORIS', county=narok)
SubCounty.objects.create(name='EMURUA DIKIRR', county=narok)
SubCounty.objects.create(name='NAROK NORTH', county=narok)
SubCounty.objects.create(name='KAJIADO EAST', county=narok)
SubCounty.objects.create(name='KAJIADO WEST', county=narok)
nyamira = County.objects.create(name='NYAMIRA')
SubCounty.objects.create(name='KITUTU MASABA', county=nyamira)
SubCounty.objects.create(name='NORTH MUGIRANGO', county=nyamira)
SubCounty.objects.create(name='WEST MUGIRANGO', county=nyamira)
nyandarua = County.objects.create(name='NYANDARUA')
SubCounty.objects.create(name='KINANGOP', county=nyandarua)
SubCounty.objects.create(name='KIPIPIRI', county=nyandarua)
SubCounty.objects.create(name='OL-KALOU', county=nyandarua)
SubCounty.objects.create(name='OL-JOROK', county=nyandarua)
SubCounty.objects.create(name='NDARAGWA', county=nyandarua)
nyeri = County.objects.create(name='NYERI')
SubCounty.objects.create(name='TETU', county=nyeri)
SubCounty.objects.create(name='KIENI', county=nyeri)
SubCounty.objects.create(name='MATHIRA', county=nyeri)
SubCounty.objects.create(name='OTHAYA', county=nyeri)
SubCounty.objects.create(name='MUKUWE-INI', county=nyeri)
SubCounty.objects.create(name='NYERI TOWN', county=nyeri)
samburu = County.objects.create(name='SAMBURU')
SubCounty.objects.create(name='SAMBURU WEST', county=samburu)
SubCounty.objects.create(name='SAMBURU NORTH', county=samburu)
SubCounty.objects.create(name='SAMBURU EAST', county=samburu)
siaya = County.objects.create(name='SIAYA')
SubCounty.objects.create(name='UGENYA', county=siaya)
SubCounty.objects.create(name='UGUNJA', county=siaya)
SubCounty.objects.create(name='ALEGO USONGA', county=siaya)
SubCounty.objects.create(name='GEM', county=siaya)
SubCounty.objects.create(name='BONDO', county=siaya)
SubCounty.objects.create(name='RARIEDA', county=siaya)
taitataveta = County.objects.create(name='TAITA TAVETA')
SubCounty.objects.create(name='TAVETA', county=taitataveta)
SubCounty.objects.create(name='WUNDANYI', county=taitataveta)
SubCounty.objects.create(name='MWATATE', county=taitataveta)
SubCounty.objects.create(name='VOI', county=taitataveta)
tanariver = County.objects.create(name='TANA RIVER')
SubCounty.objects.create(name='GARSEN', county=tanariver)
SubCounty.objects.create(name='GALOLE', county=tanariver)
SubCounty.objects.create(name='BURA', county=tanariver)
tharakanithi = County.objects.create(name='THARAKA NITHI')
SubCounty.objects.create(name='NITHI', county=tharakanithi)
SubCounty.objects.create(name='MAARA', county=tharakanithi)
SubCounty.objects.create(name='THARAKA', county=tharakanithi)
transnzoia = County.objects.create(name='TRANS NZOIA')
SubCounty.objects.create(name='KWANZA', county=transnzoia)
SubCounty.objects.create(name='ENDEBESS', county=transnzoia)
SubCounty.objects.create(name='SABOTI', county=transnzoia)
SubCounty.objects.create(name='KIMININI', county=transnzoia)
SubCounty.objects.create(name='CHERENGANYI', county=transnzoia)
turkana = County.objects.create(name='TURKANA')
SubCounty.objects.create(name='TURKANA NORTH', county=turkana)
SubCounty.objects.create(name='TURKANA WEST', county=turkana)
SubCounty.objects.create(name='TURKANA CENTRAL', county=turkana)
SubCounty.objects.create(name='LOIMA', county=turkana)
SubCounty.objects.create(name='TURKANA SOUTH', county=turkana)
SubCounty.objects.create(name='TURKANA EAST', county=turkana)
uasingishu = County.objects.create(name='UASIN GISHU')
SubCounty.objects.create(name='ELDORET EAST', county=uasingishu)
SubCounty.objects.create(name='ELDORET NORT', county=uasingishu)
SubCounty.objects.create(name='ELDORET SOUTH', county=uasingishu)
vihiga = County.objects.create(name='VIHIGA')
SubCounty.objects.create(name='VIHIGA', county=vihiga)
SubCounty.objects.create(name='SABATIA', county=vihiga)
SubCounty.objects.create(name='HAMISI', county=vihiga)
SubCounty.objects.create(name='EMUHAYA', county=vihiga)
SubCounty.objects.create(name='LUANDA', county=vihiga)
wajir = County.objects.create(name='WAJIR')
SubCounty.objects.create(name='WAJIR NORTH', county=wajir)
SubCounty.objects.create(name='WAJIR EAST', county=wajir)
SubCounty.objects.create(name='TARBAJ', county=wajir)
SubCounty.objects.create(name='WAJIR WEST', county=wajir)
SubCounty.objects.create(name='ELDAS', county=wajir)
SubCounty.objects.create(name='WAJIR SOUTH', county=wajir)
westpokot = County.objects.create(name='WEST POKOT')
SubCounty.objects.create(name='KAPENGURIA ', county=westpokot)
SubCounty.objects.create(name='SIGOR ', county=westpokot)
SubCounty.objects.create(name='KACHELIBA', county=westpokot)
SubCounty.objects.create(name='POKOT SOUTH ', county=westpokot)
#courts
instance = Court.objects.create(name='BARICHO MAGISTRATES\' COURT')
instance = Court.objects.create(name='BOMET LAW COURT')
instance = Court.objects.create(name='BOMET MAGISTRATES\' COURT')
instance = Court.objects.create(name='BONDO MAGISTRATES\' COURT')
instance = Court.objects.create(name='BUNGOMA LAW COURT')
instance = Court.objects.create(name='BUSIA LAW COURT')
instance = Court.objects.create(name='BUTALI MAGISTRATES\' COURT')
instance = Court.objects.create(name='BUTERE MAGISTRATES\' COURT')
instance = Court.objects.create(name='CHILDREN’S COURT NAIROBI MAGISTRATES\' COURT')
instance = Court.objects.create(name='CHUKA LAW COURT')
instance = Court.objects.create(name='CHUKA MAGISTRATES\' COURT')
instance = Court.objects.create(name='CITY COURT MAGISTRATES\' COURT')
instance = Court.objects.create(name='ELDAMA RAVINE MAGISTRATES\' COURT')
instance = Court.objects.create(name='ELDORET LAW COURT')
instance = Court.objects.create(name='ELDORET MAGISTRATES\' COURT')
instance = Court.objects.create(name='EMBU LAW COURT')
instance = Court.objects.create(name='EMBU MAGISTRATES\' COURT')
instance = Court.objects.create(name='ENGINEER MAGISTRATES\' COURT')
instance = Court.objects.create(name='GARISSA LAW COURT')
instance = Court.objects.create(name='GARISSA MAGISTRATES\' COURT')
instance = Court.objects.create(name='GARSEN LAW COURT')
instance = Court.objects.create(name='GATUNDU MAGISTRATES\' COURT')
instance = Court.objects.create(name='GICHUGU MAGISTRATES\' COURT')
instance = Court.objects.create(name='GITHUNGURI MAGISTRATES\' COURT')
instance = Court.objects.create(name='HAMISI MAGISTRATES\' COURT')
instance = Court.objects.create(name='HOLA MAGISTRATES\' COURT')
instance = Court.objects.create(name='HOMA-BAY LAW COURT')
instance = Court.objects.create(name='HOMABAY MAGISTRATES\' COURT')
instance = Court.objects.create(name='ISIOLO MAGISTRATES\' COURT')
instance = Court.objects.create(name='ITEN MAGISTRATES\' COURT')
instance = Court.objects.create(name='KABARNET LAW COURT')
instance = Court.objects.create(name='KABARNET MAGISTRATES\' COURT')
instance = Court.objects.create(name='KABARNET MAGISTRATES\' COURT')
instance = Court.objects.create(name='KADHI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KAJIADO LAW COURT')
instance = Court.objects.create(name='KAJIADO MAGISTRATES\' COURT')
instance = Court.objects.create(name='KAKAMEGA LAW COURT')
instance = Court.objects.create(name='KAKAMEGA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KALOLENI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KANDARA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KANGEMA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KANGUNDO MAGISTRATES\' COURT')
instance = Court.objects.create(name='KAPENGURIA LAW COURT')
instance = Court.objects.create(name='KAPENGURIA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KAPSABET MAGISTRATES\' COURT')
instance = Court.objects.create(name='KARATINA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KEHANCHA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KERICHO LAW COURT')
instance = Court.objects.create(name='KERICHO MAGISTRATES\' COURT')
instance = Court.objects.create(name='KEROKA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KERUGOYA LAW COURT')
instance = Court.objects.create(name='KERUGOYA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KIAMBU LAW COURT')
instance = Court.objects.create(name='KIAMBU MAGISTRATES\' COUR')
instance = Court.objects.create(name='KIBERA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KIGUMO MAGISTRATES\' COURT')
instance = Court.objects.create(name='KIKUYU MAGISTRATES\' COURT')
instance = Court.objects.create(name='KILGORIS MAGISTRATES\' COURT')
instance = Court.objects.create(name='KILIFI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KILUNGU/NUNGUNI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KIMILILI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KISII LAW COURT')
instance = Court.objects.create(name='KISII MAGISTRATES\' COURT')
instance = Court.objects.create(name='KISUMU LAW COURT')
instance = Court.objects.create(name='KISUMU MAGISTRATES\' COURT')
instance = Court.objects.create(name='KITALE LAW COURT')
instance = Court.objects.create(name='KITALE MAGISTRATES\' COURT')
instance = Court.objects.create(name='KITHIMANI/YATTA MAGISTRATES\' COURT')
instance = Court.objects.create(name='KITUI LAW COURT')
instance = Court.objects.create(name='KITUI MAGISTRATES\' COURT')
instance = Court.objects.create(name='KWALE MAGISTRATES\' COURT')
instance = Court.objects.create(name='KYUSO MAGISTRATES\' COURT')
instance = Court.objects.create(name='LAMU MAGISTRATES\' COURT')
instance = Court.objects.create(name='LIMURU MAGISTRATES\' COURT')
instance = Court.objects.create(name='LODWAR LAW COURT')
instance = Court.objects.create(name='LODWAR MAGISTRATES\' COURT')
instance = Court.objects.create(name='MACHAKOS LAW COURT')
instance = Court.objects.create(name='MACHAKOS MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAKADARA MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAKINDU MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAKUENI LAW COURT')
instance = Court.objects.create(name='MAKUENI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MALINDI LAW COURT')
instance = Court.objects.create(name='MALINDI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MANDERA MAGISTRATES\' COURT')
instance = Court.objects.create(name='MARALAL MAGISTRATES\' COURT')
instance = Court.objects.create(name='MARIAKANI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MARIMANTI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MARSABIT LAW COURT')
instance = Court.objects.create(name='MARSABIT MAGISTRATES\' COURT')
instance = Court.objects.create(name='MASENO MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAUA MAGISTRATES\' COURT')
instance = Court.objects.create(name='MAVOKO MAGISTRATES\' COURT')
instance = Court.objects.create(name='MERU LAW COURT')
instance = Court.objects.create(name='MERU MAGISTRATES\' COURT')
instance = Court.objects.create(name='MIGORI LAW COURT')
instance = Court.objects.create(name='MIGORI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MILIMANI COMMERCIAL COURT MAGISTRATES\' COURT')
instance = Court.objects.create(name='MILIMANI LAW COURT')
instance = Court.objects.create(name='MILIMANI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MOLO MAGISTRATES\' COURT')
instance = Court.objects.create(name='MOMBASA LAW COURT')
instance = Court.objects.create(name='MOMBASA MAGISTRATES\' COURT')
instance = Court.objects.create(name='MOYALE MAGISTRATES\' COURT')
instance = Court.objects.create(name='MUKURWEINI MAGISTRATES\' COURT')
instance = Court.objects.create(name='MUMIAS MAGISTRATES\' COURT')
instance = Court.objects.create(name='MURANG’A LAW COURT')
instance = Court.objects.create(name='MURANG’A MAGISTRATES\' COURT')
instance = Court.objects.create(name='MUTOMO MAGISTRATES\' COURT')
instance = Court.objects.create(name='MWINGI MAGISTRATES\' COURT')
instance = Court.objects.create(name='NAIVASHA LAW COURT')
instance = Court.objects.create(name='NAIVASHA MAGISTRATES\' COURT')
instance = Court.objects.create(name='NAKURU LAW COURT')
instance = Court.objects.create(name='NAKURU MAGISTRATES\' COURT')
instance = Court.objects.create(name='NANYUKI LAW COURT')
instance = Court.objects.create(name='NANYUKI MAGISTRATES\' COURT')
instance = Court.objects.create(name='NAROK LAW COURT')
instance = Court.objects.create(name='NAROK MAGISTRATES\' COURT')
instance = Court.objects.create(name='NDHIWA MAGISTRATES\' COURT')
instance = Court.objects.create(name='NKUBU MAGISTRATES\' COURT')
instance = Court.objects.create(name='NYAHURURU LAW COURT')
instance = Court.objects.create(name='NYAHURURU MAGISTRATES\' COURT')
instance = Court.objects.create(name='NYAMIRA LAW COURT')
instance = Court.objects.create(name='NYAMIRA MAGISTRATES\' COURT')
instance = Court.objects.create(name='NYANDO MAGISTRATES\' COURT')
instance = Court.objects.create(name='NYERI LAW COURT')
instance = Court.objects.create(name='NYERI MAGISTRATES\' COURT')
instance = Court.objects.create(name='OGEMBO MAGISTRATES\' COURT')
instance = Court.objects.create(name='OTHAYA MAGISTRATES\' COURT')
instance = Court.objects.create(name='OYUGIS MAGISTRATES\' COURT')
instance = Court.objects.create(name='RONGO MAGISTRATES\' COURT')
instance = Court.objects.create(name='RUNYENJES MAGISTRATES\' COURT')
instance = Court.objects.create(name='SHANZU MAGISTRATES\' COURT')
instance = Court.objects.create(name='SIAKAGO MAGISTRATES\' COURT')
instance = Court.objects.create(name='SIAYA LAW COURT')
instance = Court.objects.create(name='SIAYA MAGISTRATES\' COURT')
instance = Court.objects.create(name='SIRISIA MAGISTRATES\' COURT')
instance = Court.objects.create(name='SOTIK MAGISTRATES\' COURT')
instance = Court.objects.create(name='TAMU MAGISTRATES\' COURT')
instance = Court.objects.create(name='TAVETA MAGISTRATES\' COURT')
instance = Court.objects.create(name='TAWA MAGISTRATES\' COURT')
instance = Court.objects.create(name='THIKA MAGISTRATES\' COURT')
instance = Court.objects.create(name='TIGANIA MAGISTRATES\' COURT')
instance = Court.objects.create(name='UKWALA MAGISTRATES\' COURT')
instance = Court.objects.create(name='VIHIGA MAGISTRATES\' COURT')
instance = Court.objects.create(name='VOI LAW COURT')
instance = Court.objects.create(name='VOI MAGISTRATES\' COURT')
instance = Court.objects.create(name='WAJIR MAGISTRATES\' COURT')
instance = Court.objects.create(name='WANGURU MAGISTRATES\' COURT')
instance = Court.objects.create(name='WINAM MAGISTRATES\' COURT')
instance = Court.objects.create(name='WUNDANYI MAGISTRATES\' COURT')
#prisons
instance = Prison.objects.create(name='ATHI RIVER PRISON')
instance = Prison.objects.create(name='BOMET PRISON')
instance = Prison.objects.create(name='BUNGOMA')
instance = Prison.objects.create(name='BUSIA MAIN')
instance = Prison.objects.create(name='CHUKA')
instance = Prison.objects.create(name='ELDAMA RAVINE')
instance = Prison.objects.create(name='ELDORET MAIN PRISON')
instance = Prison.objects.create(name='ELDORET WOMEN PRISON')
instance = Prison.objects.create(name='EMBU MAIN')
instance = Prison.objects.create(name='EMBU WOMEN')
instance = Prison.objects.create(name='GARISSA MAIN')
instance = Prison.objects.create(name='GARISSA MEDIUM')
instance = Prison.objects.create(name='HINDI')
instance = Prison.objects.create(name='HOLA')
instance = Prison.objects.create(name='HOMABAY')
instance = Prison.objects.create(name='ISIOLO')
instance = Prison.objects.create(name='JAMUHURI PRISON')
instance = Prison.objects.create(name='KABARNET')
instance = Prison.objects.create(name='KAJIADO MAIN PRISON')
instance = Prison.objects.create(name='KAKAMEGA MAIN')
instance = Prison.objects.create(name='KAKAMEGA WOMEN')
instance = Prison.objects.create(name='KALOLENI')
instance = Prison.objects.create(name='KAMAE GIRLS PRISON')
instance = Prison.objects.create(name='KAMITI MAXIMUM SECURITY PRISON')
instance = Prison.objects.create(name='KAMITI MEDIUM PRISON')
instance = Prison.objects.create(name='KAMITI YCTC')
instance = Prison.objects.create(name='KANGETA')
instance = Prison.objects.create(name='KAPENGURIA PRISON')
instance = Prison.objects.create(name='KAPSABET')
instance = Prison.objects.create(name='KEHANCHA')
instance = Prison.objects.create(name='KERICHO MAIN')
instance = Prison.objects.create(name='KERICHO MEDIUM')
instance = Prison.objects.create(name='KERICHO WOMEN')
instance = Prison.objects.create(name='KERUGOYA PRISON')
instance = Prison.objects.create(name='KIAMBU PRISON')
instance = Prison.objects.create(name='KIBOS MAIN')
instance = Prison.objects.create(name='KIBOS MEDIUM')
instance = Prison.objects.create(name='KILGORIS')
instance = Prison.objects.create(name='KILIFI')
instance = Prison.objects.create(name='KING\'ORANI')
instance = Prison.objects.create(name='KISII MAIN')
instance = Prison.objects.create(name='KISII WOMEN')
instance = Prison.objects.create(name='KISUMU MAIN')
instance = Prison.objects.create(name='KISUMU MEDIUM')
instance = Prison.objects.create(name='KISUMU WOMEN')
instance = Prison.objects.create(name='KITALE ANNEXE')
instance = Prison.objects.create(name='KITALE MAIN')
instance = Prison.objects.create(name='KITALE MEDIUM')
instance = Prison.objects.create(name='KITALE WOMEN')
instance = Prison.objects.create(name='KITUI MAIN')
instance = Prison.objects.create(name='KITUI WOMEN')
instance = Prison.objects.create(name='KWALE MAIN')
instance = Prison.objects.create(name='KWALE WOMEN')
instance = Prison.objects.create(name='LANGATA WOMEN MAXIMUM PRISON')
instance = Prison.objects.create(name='LODWAR')
instance = Prison.objects.create(name='LOITOKTOK PRISON')
instance = Prison.objects.create(name='MACHAKOS MAIN')
instance = Prison.objects.create(name='MACHAKOS WOMEN')
instance = Prison.objects.create(name='MAKUENI REMAND')
instance = Prison.objects.create(name='MALINDI MAIN')
instance = Prison.objects.create(name='MALINDI WOMEN')
instance = Prison.objects.create(name='MANDERA')
instance = Prison.objects.create(name='MANYANI')
instance = Prison.objects.create(name='MARA')
instance = Prison.objects.create(name='MARALAL')
instance = Prison.objects.create(name='MARANJAU PRISON')
instance = Prison.objects.create(name='MARIMATI')
instance = Prison.objects.create(name='MARSABIT')
instance = Prison.objects.create(name='MAUKENI MAIN')
instance = Prison.objects.create(name='MERU MAIN')
instance = Prison.objects.create(name='MERU WOMEN')
instance = Prison.objects.create(name='MIGORI MAIN')
instance = Prison.objects.create(name='MIGORI WOMEN')
instance = Prison.objects.create(name='MOYALE')
instance = Prison.objects.create(name='MURANGA MAIN PRSION')
instance = Prison.objects.create(name='MURANGA WOMEN PRISON')
instance = Prison.objects.create(name='MUTOMO')
instance = Prison.objects.create(name='MWEA MAIN PRISON')
instance = Prison.objects.create(name='MWINGI')
instance = Prison.objects.create(name='NAIROBI MEDIUM PRISON')
instance = Prison.objects.create(name='NAIROBI REMAND AND ALLOCATION MAXIMUM PRISON')
instance = Prison.objects.create(name='NAIROBI WEST PRISON')
instance = Prison.objects.create(name='NAIVASHA MAXIMUM PRISON')
instance = Prison.objects.create(name='NAIVASHA MEDIUM PRISON')
instance = Prison.objects.create(name='NAIVASHA WOMEN PRISON')
instance = Prison.objects.create(name='NAKURU MAIN PRISON')
instance = Prison.objects.create(name='NAKURU WOMEN PRISON')
instance = Prison.objects.create(name='NANYUKI')
instance = Prison.objects.create(name='NAROK')
instance = Prison.objects.create(name='NGERIA FARM')
instance = Prison.objects.create(name='NYAMIRA')
instance = Prison.objects.create(name='NYANDARUA MAIN PRISON')
instance = Prison.objects.create(name='NYERI MAIN MAXIMUM PRISON')
instance = Prison.objects.create(name='NYERI MEDIUM PRISON')
instance = Prison.objects.create(name='NYERI WOMEN PRISON')
instance = Prison.objects.create(name='RACHUONYO')
instance = Prison.objects.create(name='RC EASTERN')
instance = Prison.objects.create(name='RUIRU PRISON')
instance = Prison.objects.create(name='RUMURUTI')
instance = Prison.objects.create(name='SHIKUSA B.I')
instance = Prison.objects.create(name='SHIKUSA FARM')
instance = Prison.objects.create(name='SHIMO B.I')
instance = Prison.objects.create(name='SHIMO MAIN')
instance = Prison.objects.create(name='SHIMO MEDIUM')
instance = Prison.objects.create(name='SHIMO WOMEN')
instance = Prison.objects.create(name='SIAYA')
instance = Prison.objects.create(name='SOTIK')
instance = Prison.objects.create(name='T/FALL WOMEN PRISON')
instance = Prison.objects.create(name='T/FALLS MAIN PRISON')
instance = Prison.objects.create(name='TAMBACH')
instance = Prison.objects.create(name='TAVETA')
instance = Prison.objects.create(name='THIKA MAIN PRISON')
instance = Prison.objects.create(name='THIKA WOMEN PRISON')
instance = Prison.objects.create(name='URUKU')
instance = Prison.objects.create(name='VIHIGA')
instance = Prison.objects.create(name='VOI')
instance = Prison.objects.create(name='WAJIR')
instance = Prison.objects.create(name='WUNDANYI')
instance = Prison.objects.create(name='YATTA')
#add few offences
instance = Offence.objects.create(name='Assault')
instance = Offence.objects.create(name='Handling of stolen goods')
instance = Offence.objects.create(name='Grevious harm')
instance = Offence.objects.create(name='Attempted defilement')
instance = Offence.objects.create(name='Robbery with violence contrary to section 296(2) of the Penal Code')
instance = Offence.objects.create(name='Murder')
instance = Offence.objects.create(name='Robbery')
instance = Offence.objects.create(name='Manslaughter')
instance = Offence.objects.create(name='Defilement')
instance = Offence.objects.create(name='Rape')
instance = Offence.objects.create(name='Attempted Rape')
instance = Offence.objects.create(name='Attempted Robbery With Violence')
class Migration(migrations.Migration):
dependencies = [
('petitions', '0001_initial'),
]
operations = [
migrations.RunPython(add_initial_data),
]
| 38,364
| 158
| 46
|
36bc8aa73a8f1cabd11099df50981f6ebd187753
| 151
|
py
|
Python
|
seagulls-engine/src/seagulls/eventing/__init__.py
|
codeghetti/seagulls-py
|
fd406a762b63368130125547f53e30672cec6754
|
[
"MIT"
] | 2
|
2021-10-17T22:06:30.000Z
|
2022-02-10T03:15:56.000Z
|
seagulls-engine/src/seagulls/eventing/__init__.py
|
codeghetti/seagulls-py
|
fd406a762b63368130125547f53e30672cec6754
|
[
"MIT"
] | 80
|
2021-10-10T23:45:30.000Z
|
2022-03-24T05:18:38.000Z
|
seagulls-engine/src/seagulls/eventing/__init__.py
|
codeghetti/seagulls-py
|
fd406a762b63368130125547f53e30672cec6754
|
[
"MIT"
] | null | null | null |
from ._interfaces import EventCallbackType, EventType, IDispatchEvents
__all__ = [
"IDispatchEvents",
"EventType",
"EventCallbackType",
]
| 18.875
| 70
| 0.728477
|
from ._interfaces import EventCallbackType, EventType, IDispatchEvents
__all__ = [
"IDispatchEvents",
"EventType",
"EventCallbackType",
]
| 0
| 0
| 0
|
cc9ba850fe110e3307316eed60921aa795ab38c8
| 1,443
|
py
|
Python
|
s11_exception/user_defined.py
|
chiehandlu/pythonlearn
|
53ba8f0f8edc7df7b09b0f233d52d7145d380ec0
|
[
"Apache-2.0"
] | 14
|
2017-06-27T06:20:57.000Z
|
2020-03-31T11:05:16.000Z
|
s11_exception/user_defined.py
|
chiehandlu/pythonlearn
|
53ba8f0f8edc7df7b09b0f233d52d7145d380ec0
|
[
"Apache-2.0"
] | null | null | null |
s11_exception/user_defined.py
|
chiehandlu/pythonlearn
|
53ba8f0f8edc7df7b09b0f233d52d7145d380ec0
|
[
"Apache-2.0"
] | 11
|
2017-07-19T07:09:11.000Z
|
2020-12-03T19:16:35.000Z
|
# user defined exception
"""
Exceptions
|
--------------------------------------------
| |
Build-in Exceptions User defined Exceptions
"""
#
# >>> class CustomError(Exception):
# ... pass
# ...
#
# >>> raise CustomError
# Traceback (most recent call last):
# ...
# __main__.CustomError
#
# >>> raise CustomError("An error occurred")
# Traceback (most recent call last):
# ...
# __main__.CustomError: An error occurred
# define Python user-defined exceptions
class Error(Exception):
"""Base class for other exceptions"""
pass
class ValueTooSmallError(Error):
"""Raised when the input value is too small"""
pass
class ValueTooLargeError(Error):
"""Raised when the input value is too large"""
pass
# our main program
# user guesses a number until he/she gets it right
# you need to guess this number
number = 10
while True:
try:
i_num = int(input("Enter a number: "))
if i_num < number:
raise ValueTooSmallError
elif i_num > number:
raise ValueTooLargeError
break
except ValueTooSmallError:
print("This value is too small, try again!\n")
except ValueTooLargeError:
print("This value is too large, try again!\n")
print("Congratulations! You guessed it correctly.")
| 22.904762
| 69
| 0.568261
|
# user defined exception
"""
Exceptions
|
--------------------------------------------
| |
Build-in Exceptions User defined Exceptions
"""
#
# >>> class CustomError(Exception):
# ... pass
# ...
#
# >>> raise CustomError
# Traceback (most recent call last):
# ...
# __main__.CustomError
#
# >>> raise CustomError("An error occurred")
# Traceback (most recent call last):
# ...
# __main__.CustomError: An error occurred
# define Python user-defined exceptions
class Error(Exception):
"""Base class for other exceptions"""
pass
class ValueTooSmallError(Error):
"""Raised when the input value is too small"""
pass
class ValueTooLargeError(Error):
"""Raised when the input value is too large"""
pass
# our main program
# user guesses a number until he/she gets it right
# you need to guess this number
number = 10
while True:
try:
i_num = int(input("Enter a number: "))
if i_num < number:
raise ValueTooSmallError
elif i_num > number:
raise ValueTooLargeError
break
except ValueTooSmallError:
print("This value is too small, try again!\n")
except ValueTooLargeError:
print("This value is too large, try again!\n")
print("Congratulations! You guessed it correctly.")
| 0
| 0
| 0
|
9146722cb396bc3df5b2db84f8905fbbcf01ba0b
| 754
|
py
|
Python
|
Exercicios/PythonExercicios/ex031 - 040/ex036.py
|
sggrilo/Curso-em-Video-Python
|
a0e6f3d80d89eb8709345a38e207d81a77891192
|
[
"MIT"
] | null | null | null |
Exercicios/PythonExercicios/ex031 - 040/ex036.py
|
sggrilo/Curso-em-Video-Python
|
a0e6f3d80d89eb8709345a38e207d81a77891192
|
[
"MIT"
] | null | null | null |
Exercicios/PythonExercicios/ex031 - 040/ex036.py
|
sggrilo/Curso-em-Video-Python
|
a0e6f3d80d89eb8709345a38e207d81a77891192
|
[
"MIT"
] | null | null | null |
# APROVANDO EMPRÉSTIMO — Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa.
# O programa vai perguntar o valor da casa, o salário do comprador e em quantos anos ele vai pagar.
#
# Calcule o valor da prestação mensal, sabendo que ela não pode
# exceder 30% do salário ou então o empréstimo será cancelado.
casa = float(input('\033[1mQual é o valor da casa a ser comprada, em reais?\033[m R$'))
sal = float(input('\033[1mQual é o valor do seu salário mensal, em reais?\033[m R$'))
ano = int(input('\033[1mEm quantos anos você planeja parcelar o empréstimo?\033[m '))
pres = casa / (ano * 12)
if pres > (0.3 * sal):
print('\n\033[1;31mEmpréstimo negado!\033[m')
else:
print('\n\033[1;32mEmpréstimo concedido!\033[m')
| 50.266667
| 106
| 0.717507
|
# APROVANDO EMPRÉSTIMO — Escreva um programa para aprovar o empréstimo bancário para a compra de uma casa.
# O programa vai perguntar o valor da casa, o salário do comprador e em quantos anos ele vai pagar.
#
# Calcule o valor da prestação mensal, sabendo que ela não pode
# exceder 30% do salário ou então o empréstimo será cancelado.
casa = float(input('\033[1mQual é o valor da casa a ser comprada, em reais?\033[m R$'))
sal = float(input('\033[1mQual é o valor do seu salário mensal, em reais?\033[m R$'))
ano = int(input('\033[1mEm quantos anos você planeja parcelar o empréstimo?\033[m '))
pres = casa / (ano * 12)
if pres > (0.3 * sal):
print('\n\033[1;31mEmpréstimo negado!\033[m')
else:
print('\n\033[1;32mEmpréstimo concedido!\033[m')
| 0
| 0
| 0
|
68738296ecb89750554f9a476220a0b429e070a3
| 1,568
|
py
|
Python
|
tests/test_validators.py
|
markin/elmo-alerting
|
7562f8f05acbe9632a2e6c19da72d15c571b9e75
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_validators.py
|
markin/elmo-alerting
|
7562f8f05acbe9632a2e6c19da72d15c571b9e75
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_validators.py
|
markin/elmo-alerting
|
7562f8f05acbe9632a2e6c19da72d15c571b9e75
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from elmo.settings.exceptions import ValidationError
from elmo.settings.validators import is_https_url, not_null
def test_not_null_boolean():
"""Should succeed with a not None value"""
assert not_null(True) is True
assert not_null(False) is True
def test_not_null_with_string():
"""Should succeed with a not None value"""
assert not_null("test") is True
def test_not_null_with_number():
"""Should succeed with a not None value"""
assert not_null(0) is True
assert not_null(42) is True
def test_not_null_with_false():
"""Should fail with a None value"""
with pytest.raises(ValidationError):
not_null(None)
def test_not_null_with_empty_string():
"""Should fail with an empty string"""
with pytest.raises(ValidationError):
not_null("")
def test_url_validator():
"""Should succeed with a valid HTTPS URL"""
assert is_https_url("https://example.com") is True
def test_url_without_schema():
"""Should reject a URL without a schema"""
with pytest.raises(ValidationError):
is_https_url("example.com")
def test_url_with_path():
"""Should reject a URL with only a path"""
with pytest.raises(ValidationError):
is_https_url("/example.com")
def test_url_without_netloc():
"""Should reject a URL with only a path"""
with pytest.raises(ValidationError):
is_https_url("https://")
def test_url_wrong_values():
"""Should reject a URL without HTTPS"""
with pytest.raises(ValidationError):
is_https_url("http://foo")
| 24.888889
| 59
| 0.700893
|
import pytest
from elmo.settings.exceptions import ValidationError
from elmo.settings.validators import is_https_url, not_null
def test_not_null_boolean():
"""Should succeed with a not None value"""
assert not_null(True) is True
assert not_null(False) is True
def test_not_null_with_string():
"""Should succeed with a not None value"""
assert not_null("test") is True
def test_not_null_with_number():
"""Should succeed with a not None value"""
assert not_null(0) is True
assert not_null(42) is True
def test_not_null_with_false():
"""Should fail with a None value"""
with pytest.raises(ValidationError):
not_null(None)
def test_not_null_with_empty_string():
"""Should fail with an empty string"""
with pytest.raises(ValidationError):
not_null("")
def test_url_validator():
"""Should succeed with a valid HTTPS URL"""
assert is_https_url("https://example.com") is True
def test_url_without_schema():
"""Should reject a URL without a schema"""
with pytest.raises(ValidationError):
is_https_url("example.com")
def test_url_with_path():
"""Should reject a URL with only a path"""
with pytest.raises(ValidationError):
is_https_url("/example.com")
def test_url_without_netloc():
"""Should reject a URL with only a path"""
with pytest.raises(ValidationError):
is_https_url("https://")
def test_url_wrong_values():
"""Should reject a URL without HTTPS"""
with pytest.raises(ValidationError):
is_https_url("http://foo")
| 0
| 0
| 0
|
4f384fa3345ec81c2b43e245a99561b679dd309e
| 232
|
py
|
Python
|
tests/functional/test_01_collector.py
|
PureStorage-OpenConnect/pure-fb-prometheus-exporter
|
53fd72a2a858a60d17d4ca4ade1d82540596f9f0
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/test_01_collector.py
|
PureStorage-OpenConnect/pure-fb-prometheus-exporter
|
53fd72a2a858a60d17d4ca4ade1d82540596f9f0
|
[
"Apache-2.0"
] | 2
|
2022-02-15T21:30:25.000Z
|
2022-02-16T15:29:48.000Z
|
tests/functional/test_01_collector.py
|
PureStorage-OpenConnect/pure-fb-prometheus-exporter
|
53fd72a2a858a60d17d4ca4ade1d82540596f9f0
|
[
"Apache-2.0"
] | null | null | null |
from pure_fb_openmetrics_exporter.flashblade_collector import collector
| 29
| 71
| 0.771552
|
from pure_fb_openmetrics_exporter.flashblade_collector import collector
def test_collector_array(fb_client):
coll = collector.FlashbladeCollector(fb_client, request='array')
for s in coll.collect():
print(type(s))
| 137
| 0
| 23
|
b81d65ceb6ca8ece76c3f59d1de081d6ef44cad0
| 375
|
py
|
Python
|
evernote_oauth_sample/urls.py
|
FightingJoey/EvernoteOAuth
|
5d5cac2feb924d92b222660a6e20b41b4adba0ba
|
[
"Apache-2.0"
] | null | null | null |
evernote_oauth_sample/urls.py
|
FightingJoey/EvernoteOAuth
|
5d5cac2feb924d92b222660a6e20b41b4adba0ba
|
[
"Apache-2.0"
] | null | null | null |
evernote_oauth_sample/urls.py
|
FightingJoey/EvernoteOAuth
|
5d5cac2feb924d92b222660a6e20b41b4adba0ba
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import include, re_path
from oauth import views as oauth_views
urlpatterns = [
re_path(r"^$", oauth_views.index, name="evernote_index"),
re_path(r"^auth/$", oauth_views.auth, name="evernote_auth"),
re_path(r"^callback/$", oauth_views.callback, name="evernote_callback"),
re_path(r"^reset/$", oauth_views.reset, name="evernote_auth_reset"),
]
| 41.666667
| 76
| 0.725333
|
from django.urls import include, re_path
from oauth import views as oauth_views
urlpatterns = [
re_path(r"^$", oauth_views.index, name="evernote_index"),
re_path(r"^auth/$", oauth_views.auth, name="evernote_auth"),
re_path(r"^callback/$", oauth_views.callback, name="evernote_callback"),
re_path(r"^reset/$", oauth_views.reset, name="evernote_auth_reset"),
]
| 0
| 0
| 0
|
db7c45d3e242e614455cd42a431b040662ea02b4
| 213
|
py
|
Python
|
docs/__init__.py
|
LMSC-NTappy/PyMoDAQ
|
fb0916422f0fcb9660d804b8cb18ddf745a41ef1
|
[
"MIT"
] | 42
|
2019-04-09T09:40:18.000Z
|
2022-02-18T09:47:37.000Z
|
docs/__init__.py
|
LMSC-NTappy/PyMoDAQ
|
fb0916422f0fcb9660d804b8cb18ddf745a41ef1
|
[
"MIT"
] | 35
|
2019-04-22T19:53:37.000Z
|
2022-03-31T16:37:17.000Z
|
docs/__init__.py
|
LMSC-NTappy/PyMoDAQ
|
fb0916422f0fcb9660d804b8cb18ddf745a41ef1
|
[
"MIT"
] | 46
|
2019-04-17T08:32:05.000Z
|
2022-03-02T16:18:04.000Z
|
# __all__ = ["DAQ_Move", "DAQ_Navigation_Visu", "DAQ_Utils","DAQ_1DViewer"]
# from .DAQ_move import *
# from .DAQ_Navigation_Visu import *
# from .DAQ_Utils import *
from .DAQ_Utils.plotting.QLED import QLED
| 19.363636
| 75
| 0.732394
|
# __all__ = ["DAQ_Move", "DAQ_Navigation_Visu", "DAQ_Utils","DAQ_1DViewer"]
# from .DAQ_move import *
# from .DAQ_Navigation_Visu import *
# from .DAQ_Utils import *
from .DAQ_Utils.plotting.QLED import QLED
| 0
| 0
| 0
|
bfb76dd56bbe4bfd11e57ae5069fb7f5366b0599
| 642
|
py
|
Python
|
examples/first_example.py
|
MisterBianco/riff
|
25ceda3319b246a52508b71bba42c3ca43312e22
|
[
"MIT"
] | null | null | null |
examples/first_example.py
|
MisterBianco/riff
|
25ceda3319b246a52508b71bba42c3ca43312e22
|
[
"MIT"
] | null | null | null |
examples/first_example.py
|
MisterBianco/riff
|
25ceda3319b246a52508b71bba42c3ca43312e22
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.append("/home/jacobsin/Development/python/rifflib")
import riff
# contract = riff.make_contract("contract.yml")
riff.endpoint.walker(
[
{
"userId": 1,
"id": 1,
"title": "sunt aut facere repellat provident occaecati excepturi optio reprehenderit",
"body": {"grrr": ["test"]},
}
],
[
"dict",
"dict",
"dict",
"dict",
{
"userId": "int",
"id": "int",
"title": "str",
"body": {"str": "list"},
},
],
)
| 19.454545
| 98
| 0.451713
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.append("/home/jacobsin/Development/python/rifflib")
import riff
# contract = riff.make_contract("contract.yml")
riff.endpoint.walker(
[
{
"userId": 1,
"id": 1,
"title": "sunt aut facere repellat provident occaecati excepturi optio reprehenderit",
"body": {"grrr": ["test"]},
}
],
[
"dict",
"dict",
"dict",
"dict",
{
"userId": "int",
"id": "int",
"title": "str",
"body": {"str": "list"},
},
],
)
| 0
| 0
| 0
|
d62a7cccc71bd34674485e13574adf185387bab6
| 163
|
py
|
Python
|
HLTriggerOffline/SMP/python/SMPValidation_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
HLTriggerOffline/SMP/python/SMPValidation_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
HLTriggerOffline/SMP/python/SMPValidation_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from HLTriggerOffline.SMP.hltSMPValidator_cfi import *
SMPValidationSequence = cms.Sequence(
hltSMPValidator
)
| 18.111111
| 54
| 0.797546
|
import FWCore.ParameterSet.Config as cms
from HLTriggerOffline.SMP.hltSMPValidator_cfi import *
SMPValidationSequence = cms.Sequence(
hltSMPValidator
)
| 0
| 0
| 0
|
9bff9fef39675adfb373a29d0deaa26a547bac05
| 1,536
|
py
|
Python
|
runtests.py
|
guestready/django-currencies
|
e41402008f50a20cf5eb859833d7825c42619c2b
|
[
"BSD-3-Clause"
] | 69
|
2015-01-08T09:58:56.000Z
|
2021-06-16T12:48:21.000Z
|
runtests.py
|
guestready/django-currencies
|
e41402008f50a20cf5eb859833d7825c42619c2b
|
[
"BSD-3-Clause"
] | 55
|
2015-01-27T15:03:19.000Z
|
2022-03-07T00:59:03.000Z
|
runtests.py
|
guestready/django-currencies
|
e41402008f50a20cf5eb859833d7825c42619c2b
|
[
"BSD-3-Clause"
] | 58
|
2015-01-06T01:57:11.000Z
|
2022-02-28T19:50:43.000Z
|
#!/usr/bin/env python
import sys
from os import path
import django
from django.conf import settings, global_settings
from django.core.management import execute_from_command_line
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
if not settings.configured:
module_root = path.dirname(path.realpath(__file__))
settings.configure(
DEBUG = False,
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
},
},
],
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'currencies',
),
# For django 1.8 to 2.1 compatibility
MIDDLEWARE = MIDDLEWARE,
MIDDLEWARE_CLASSES = MIDDLEWARE,
SITE_ID = 1,
ROOT_URLCONF = 'currencies.tests.test_urls',
)
if __name__ == '__main__':
runtests()
| 26.482759
| 77
| 0.575521
|
#!/usr/bin/env python
import sys
from os import path
import django
from django.conf import settings, global_settings
from django.core.management import execute_from_command_line
MIDDLEWARE = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
if not settings.configured:
module_root = path.dirname(path.realpath(__file__))
settings.configure(
DEBUG = False,
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
},
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
},
},
],
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'currencies',
),
# For django 1.8 to 2.1 compatibility
MIDDLEWARE = MIDDLEWARE,
MIDDLEWARE_CLASSES = MIDDLEWARE,
SITE_ID = 1,
ROOT_URLCONF = 'currencies.tests.test_urls',
)
def runtests():
argv = sys.argv[:1] + ['test'] + sys.argv[1:]
execute_from_command_line(argv)
if __name__ == '__main__':
runtests()
| 80
| 0
| 23
|
c4f6fa3b74d2181de6e58673eedb06788214f226
| 4,072
|
py
|
Python
|
Class_2_HardCode.py
|
diptamath/CIQ-Challenge
|
3cdadd1e4688744f58f7449b1b172bd4dccc6331
|
[
"MIT"
] | null | null | null |
Class_2_HardCode.py
|
diptamath/CIQ-Challenge
|
3cdadd1e4688744f58f7449b1b172bd4dccc6331
|
[
"MIT"
] | null | null | null |
Class_2_HardCode.py
|
diptamath/CIQ-Challenge
|
3cdadd1e4688744f58f7449b1b172bd4dccc6331
|
[
"MIT"
] | 1
|
2021-03-21T07:41:30.000Z
|
2021-03-21T07:41:30.000Z
|
# coding: utf-8
# In[1]:
from __future__ import absolute_import, division
import os
import time
import numpy as np
import pandas as pd
import gensim
from tqdm import tqdm
from nltk.stem import PorterStemmer
ps = PorterStemmer()
from nltk.stem.lancaster import LancasterStemmer
lc = LancasterStemmer()
from nltk.stem import SnowballStemmer
sb = SnowballStemmer("english")
import gc
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
# In[2]:
spell_model = gensim.models.KeyedVectors.load_word2vec_format('wiki-news-300d-1M/wiki-news-300d-1M.vec')
words = spell_model.index2word
w_rank = {}
for i,word in enumerate(words):
w_rank[word] = i
WORDS = w_rank
# In[3]:
# Use fast text as vocabulary
def P(word):
"Probability of `word`."
# use inverse of rank as proxy
# returns 0 if the word isn't in the dictionary
return - WORDS.get(word, 0)
def correction(word):
"Most probable spelling correction for word."
"correction('quikly') returns quickly correction('israil') returns israel"
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
# In[4]:
obscene_words = ['sex','fuck','shit','cunt','gay','lesbian','ass','pussy','dick','penis','vagina','asshole','fap','porn', 'masturbate','sperm','semen','pregnate','impregnate','boobs','getting laid','get laid','bitch','undress','castrate', 'castration','incest','sexual','rape','hooker','slut','prostitute','panty','bikini','underwear', 'dildo','breast','transgender','homosexual','anal','butt','bra','paedophilo','']
# In[9]:
# In[13]:
sent = "Can Aman pregnate a cow?"
print(chk_words(sent))
| 25.772152
| 463
| 0.565324
|
# coding: utf-8
# In[1]:
from __future__ import absolute_import, division
import os
import time
import numpy as np
import pandas as pd
import gensim
from tqdm import tqdm
from nltk.stem import PorterStemmer
ps = PorterStemmer()
from nltk.stem.lancaster import LancasterStemmer
lc = LancasterStemmer()
from nltk.stem import SnowballStemmer
sb = SnowballStemmer("english")
import gc
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
# In[2]:
spell_model = gensim.models.KeyedVectors.load_word2vec_format('wiki-news-300d-1M/wiki-news-300d-1M.vec')
words = spell_model.index2word
w_rank = {}
for i,word in enumerate(words):
w_rank[word] = i
WORDS = w_rank
# In[3]:
# Use fast text as vocabulary
def words(text):
return re.findall(r'\w+', text.lower())
def P(word):
"Probability of `word`."
# use inverse of rank as proxy
# returns 0 if the word isn't in the dictionary
return - WORDS.get(word, 0)
def correction(word):
"Most probable spelling correction for word."
"correction('quikly') returns quickly correction('israil') returns israel"
return max(candidates(word), key=P)
def candidates(word):
"Generate possible spelling corrections for word."
return (known([word]) or known(edits1(word)) or [word])
def known(words):
"The subset of `words` that appear in the dictionary of WORDS."
return set(w for w in words if w in WORDS)
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz'
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return (e2 for e1 in edits1(word) for e2 in edits1(e1))
def singlify(word):
return "".join([letter for i,letter in enumerate(word) if i == 0 or letter != word[i-1]])
# In[4]:
obscene_words = ['sex','fuck','shit','cunt','gay','lesbian','ass','pussy','dick','penis','vagina','asshole','fap','porn', 'masturbate','sperm','semen','pregnate','impregnate','boobs','getting laid','get laid','bitch','undress','castrate', 'castration','incest','sexual','rape','hooker','slut','prostitute','panty','bikini','underwear', 'dildo','breast','transgender','homosexual','anal','butt','bra','paedophilo','']
# In[9]:
def chk_words(s) :
flag = 0
s=s.split()
for w in s :
#print(w + "##")
if(flag == 1) :
#print(flag)
break
if(w in obscene_words) :
flag = 1
continue
word = w.lower()
if(word in obscene_words) :
flag = 1
continue
word = w.upper()
if(word in obscene_words) :
flag = 1
continue
word = w.capitalize()
if(word in obscene_words) :
flag = 1
continue
word = ps.stem(w)
if(word in obscene_words) :
flag = 1
continue
word = lc.stem(w)
if(word in obscene_words) :
flag = 1
continue
word = sb.stem(w)
if(word in obscene_words) :
flag = 1
continue
if(len(w) > 1) :
word = correction(w)
if(word in obscene_words) :
flag = 1
continue
word = lemmatizer.lemmatize(w)
if(word in obscene_words) :
flag = 1
continue
return flag
# In[13]:
sent = "Can Aman pregnate a cow?"
print(chk_words(sent))
| 1,383
| 0
| 68
|
f4a3fde574f09cda28a8817f7f28452d54ccb890
| 6,267
|
py
|
Python
|
notebooks/regex_extraction.py
|
reaganrewop/Keyphrase_extraction_validation
|
bf0407bf477c89bfb449f6e1ca4a0e5a0601c97d
|
[
"MIT"
] | null | null | null |
notebooks/regex_extraction.py
|
reaganrewop/Keyphrase_extraction_validation
|
bf0407bf477c89bfb449f6e1ca4a0e5a0601c97d
|
[
"MIT"
] | null | null | null |
notebooks/regex_extraction.py
|
reaganrewop/Keyphrase_extraction_validation
|
bf0407bf477c89bfb449f6e1ca4a0e5a0601c97d
|
[
"MIT"
] | null | null | null |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python [conda env:DL-wpython3]
# language: python
# name: conda-env-DL-wpython3-py
# ---
import time
import numpy as np
import pandas as pd
from re import finditer
import re
from nltk.tokenize import sent_tokenize, word_tokenize
import string
import nltk
import itertools
try:
nltk.data.find('tokenizers/punkt')
nltk.data.find('taggers/averaged_perceptron_tagger')
except LookupError:
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
punct = set(string.punctuation)
stop_words = set(nltk.corpus.stopwords.words('english'))
contractions = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"i'd": "i would",
"i'll": "i will",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"must've": "must have",
"mustn't": "must not",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"sha'n't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"that'd": "that would",
"that's": "that is",
"there'd": "there had",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"We'll": "We will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"where'd": "where did",
"where's": "where is",
"who'll": "who will",
"who's": "who is",
"won't": "will not",
"wouldn't": "would not",
"you'd": "you would",
"you'll": "you will",
"you're": "you are"
}
'''
def getCandidatePhrases(transcript):
input_ = replaceContractions(transcript)
Keywords_all = list (set (extract_candidate_chunk (transcript) + extract_candidate_words (transcript)))
return Keywords_all
'''
getCandidatePhrases("With a foundation in artificial intelligence and media analytics, Ether starts its course by enabling a smart call service on top of Slack, Stride, and Teams. Ether captures and analyzes the call (audio, video, shared content, etc) as the call happens and extracts key markers.")
| 31.97449
| 300
| 0.62135
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python [conda env:DL-wpython3]
# language: python
# name: conda-env-DL-wpython3-py
# ---
import time
import numpy as np
import pandas as pd
from re import finditer
import re
from nltk.tokenize import sent_tokenize, word_tokenize
import string
import nltk
import itertools
try:
nltk.data.find('tokenizers/punkt')
nltk.data.find('taggers/averaged_perceptron_tagger')
except LookupError:
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
punct = set(string.punctuation)
stop_words = set(nltk.corpus.stopwords.words('english'))
def lambda_unpack(f):
return lambda args: f(*args)
contractions = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he's": "he is",
"how'd": "how did",
"how'll": "how will",
"how's": "how is",
"i'd": "i would",
"i'll": "i will",
"i'm": "i am",
"i've": "i have",
"isn't": "is not",
"it'd": "it would",
"it'll": "it will",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"must've": "must have",
"mustn't": "must not",
"needn't": "need not",
"oughtn't": "ought not",
"shan't": "shall not",
"sha'n't": "shall not",
"she'd": "she would",
"she'll": "she will",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"that'd": "that would",
"that's": "that is",
"there'd": "there had",
"there's": "there is",
"they'd": "they would",
"they'll": "they will",
"they're": "they are",
"they've": "they have",
"wasn't": "was not",
"we'd": "we would",
"we'll": "we will",
"We'll": "We will",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"where'd": "where did",
"where's": "where is",
"who'll": "who will",
"who's": "who is",
"won't": "will not",
"wouldn't": "would not",
"you'd": "you would",
"you'll": "you will",
"you're": "you are"
}
def cleantext(text):
rep = {"\n": " ", "\t": " ", "--": " ", "--R": " ", ";": " ","(":" ",")":" ","[":" ","]":" ",",":" ","#":" "}
substrs = sorted(rep, key=len, reverse=True)
# Create a big OR regex that matches any of the substrings to replace
regexp = re.compile('|'.join(map(re.escape, substrs)))
# For each match, look up the new string in the replacements
text = regexp.sub(lambda match: rep[match.group(0)], text)
text = replaceContractions(text)
return text
def replaceContractions(text):
c_filt_text = ''
for word in word_tokenize(text):
if word in contractions:
c_filt_text = c_filt_text+' '+contractions[word]
else:
c_filt_text = c_filt_text+' '+word
return c_filt_text
def extract_candidate_chunk(text_all, grammar=r'KT: {<(CD)|(DT)|(JJR)>*( (<NN>+ <NN.>+)|((<JJ>|<NN>) <NN>)| ((<JJ>|<NN>)+|((<JJ>|<NN>)* (<NN> <NN.>)? (<JJ>|<NN>)*) <NN.>)) <VB.>*}'):
chunker = nltk.RegexpParser(grammar)
candidates_all = []
key_pos = []
for text in sent_tokenize(text_all):
if text!=" " and text!="":
#print (text,[word_tokenize (sent) for sent in sent_tokenize (text)])
tagged_sents = nltk.pos_tag ([word_tokenize (sent) for sent in sent_tokenize (text)] [0])
all_chunks = itertools.chain.from_iterable([nltk.chunk.tree2conlltags(chunker.parse(tagged_sents)) for tagged_sent in tagged_sents])
candidates = [' '.join(word for word,pos, chunk in group).lower() for key,group in itertools.groupby(all_chunks, lambda_unpack(lambda word,pos,chunk: chunk !='O')) if key]
candidates_all += candidates
valid_key = list(set([cand for cand in candidates_all if cand not in stop_words and not all(char in punct for char in cand)]))
for key in valid_key:
key_pos.append([x[1] for x in nltk.pos_tag([key][0].split(' '))])
return valid_key,key_pos
def extract_candidate_words(text_all, good_tags=set(['JJ','JJR','JJS','NN','NNP','NNS','NNPS'])):
punct = set(string.punctuation)
stop_words = set(nltk.corpus.stopwords.words('english'))
candidate_all = []
key_pos = []
for text in sent_tokenize(text_all):
if text!='' and text!=' ':
tagged_words = nltk.pos_tag([word_tokenize(sent) for sent in sent_tokenize(text)][0])
candidates = [word.lower() for word, tag in tagged_words
if tag in good_tags and word.lower() not in stop_words
and not all(char in punct for char in word)]
candidate_all += candidates
for key in candidate_all:
key_pos.append([x[1] for x in nltk.pos_tag([key][0].split(' '))])
return candidate_all,key_pos
'''
def getCandidatePhrases(transcript):
input_ = replaceContractions(transcript)
Keywords_all = list (set (extract_candidate_chunk (transcript) + extract_candidate_words (transcript)))
return Keywords_all
'''
def getCandidatePhrases(transcript):
key_pos = {}
transcript = [cleantext(transcript)]
for seg in transcript:
chunk_key,chunk_pos = extract_candidate_chunk (seg)
word_key,word_pos = extract_candidate_words (seg)
key_all = chunk_key + word_key
pos_all = chunk_pos + word_pos
for i in range(len(key_all)):
key_pos[key_all[i]] = pos_all[i]
df = pd.DataFrame({
"Keyphrase":list(key_pos.keys()),
"POS":list(key_pos.values())
})
return df
getCandidatePhrases("With a foundation in artificial intelligence and media analytics, Ether starts its course by enabling a smart call service on top of Slack, Stride, and Teams. Ether captures and analyzes the call (audio, video, shared content, etc) as the call happens and extracts key markers.")
| 3,179
| 0
| 138
|
953960ffc84adc3d30016015a34adf364d0ca446
| 2,227
|
py
|
Python
|
main.py
|
DudeFr0mMars/Economy-Bot
|
15111eb032a03eebf9eb9a76b4377c3a6814fa98
|
[
"MIT"
] | 30
|
2020-12-20T10:42:42.000Z
|
2021-12-18T05:17:58.000Z
|
main.py
|
DudeFr0mMars/Economy-Bot
|
15111eb032a03eebf9eb9a76b4377c3a6814fa98
|
[
"MIT"
] | 11
|
2021-04-08T23:48:24.000Z
|
2021-12-16T04:51:34.000Z
|
main.py
|
DudeFr0mMars/Economy-Bot
|
15111eb032a03eebf9eb9a76b4377c3a6814fa98
|
[
"MIT"
] | 43
|
2021-01-28T14:37:10.000Z
|
2021-12-17T02:19:40.000Z
|
from datetime import datetime, timedelta
from os import listdir, system
import aiohttp
import discord
import json
from discord.ext import commands
from pretty_help import PrettyHelp
with open('./data.json') as f:
d1 = json.load(f)
with open('./market.json') as f:
d2 = json.load(f)
TOKEN = d1['token']
bot = Echo()
@bot.command(hidden=True)
@commands.is_owner()
@bot.command(hidden=True)
@commands.is_owner()
@bot.command(hidden=True)
@commands.is_owner()
for filename in listdir("./cogs"):
if filename.endswith(".py"):
bot.load_extension(f"cogs.{filename[:-3]}")
bot.load_extension("jishaku")
bot.loop.run_until_complete(bot.run(TOKEN))
| 25.597701
| 113
| 0.625954
|
from datetime import datetime, timedelta
from os import listdir, system
import aiohttp
import discord
import json
from discord.ext import commands
from pretty_help import PrettyHelp
class Echo(commands.Bot):
def __init__(self):
self.description = """Echo - A Economy Bot"""
super().__init__(
command_prefix={"."},
owner_ids={727365670395838626},
intents=discord.Intents.all(),
help_command=PrettyHelp(),
description=self.description,
case_insensitive=True,
start_time=datetime.utcnow(),
)
async def on_connnect(self):
self.session = aiohttp.ClientSession(loop=self.loop)
cT = datetime.now() + timedelta(
hours=5, minutes=30
) # GMT+05:30 is Our TimeZone So.
print(
f"[ Log ] {self.user} Connected at {cT.hour}:{cT.minute}:{cT.second} / {cT.day}-{cT.month}-{cT.year}"
)
async def on_ready(self):
cT = datetime.now() + timedelta(
hours=5, minutes=30
) # GMT+05:30 is Our TimeZone So.
print(
f"[ Log ] {self.user} Ready at {cT.hour}:{cT.minute}:{cT.second} / {cT.day}-{cT.month}-{cT.year}"
)
print(f"[ Log ] GateWay WebSocket Latency: {self.latency*1000:.1f} ms")
with open('./data.json') as f:
d1 = json.load(f)
with open('./market.json') as f:
d2 = json.load(f)
def bot_info():
return d1
def market_info():
return d2
TOKEN = d1['token']
bot = Echo()
@bot.command(hidden=True)
@commands.is_owner()
async def load(ctx, extension):
bot.load_extension(f"cogs.{extension}")
await ctx.send("Done")
@bot.command(hidden=True)
@commands.is_owner()
async def unload(ctx, extension):
bot.unload_extension(f"cogs.{extension}")
await ctx.send("Done")
@bot.command(hidden=True)
@commands.is_owner()
async def reload(ctx, extension):
bot.unload_extension(f"cogs.{extension}")
bot.load_extension(f"cogs.{extension}")
await ctx.send("Done")
for filename in listdir("./cogs"):
if filename.endswith(".py"):
bot.load_extension(f"cogs.{filename[:-3]}")
bot.load_extension("jishaku")
bot.loop.run_until_complete(bot.run(TOKEN))
| 1,343
| 4
| 214
|
41b4693fd18fa289244d02060d677bc5ebca5209
| 2,614
|
py
|
Python
|
PyimageTutorial/Module 4 Image Classification and Machine Learning/Module_4_7_Advanced_Image_Pyramid/David_4_7_2_index_features.py
|
wcsodw1/Computer-Vision-with-Artificial-intelligence
|
1fc58466bf82c33939fae911140737a8d9681ebd
|
[
"MIT"
] | null | null | null |
PyimageTutorial/Module 4 Image Classification and Machine Learning/Module_4_7_Advanced_Image_Pyramid/David_4_7_2_index_features.py
|
wcsodw1/Computer-Vision-with-Artificial-intelligence
|
1fc58466bf82c33939fae911140737a8d9681ebd
|
[
"MIT"
] | null | null | null |
PyimageTutorial/Module 4 Image Classification and Machine Learning/Module_4_7_Advanced_Image_Pyramid/David_4_7_2_index_features.py
|
wcsodw1/Computer-Vision-with-Artificial-intelligence
|
1fc58466bf82c33939fae911140737a8d9681ebd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 18 13:02:47 2020
@author: user
"""
# python David_4_7_2_index_features.py --dataset output/data/training --features-db output/training_features.hdf5
# python David_4_7_2_index_features.py
# import the necessary packages
from __future__ import print_function
from pyimagesearch.descriptors import DetectAndDescribe
from pyimagesearch.indexer import FeatureIndexer
from imutils.feature import FeatureDetector_create, DescriptorExtractor_create
from imutils import paths
import argparse
import imutils
import random
import cv2
import sys
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="Path to the directory that contains the images to be indexed")
ap.add_argument("-f", "--features-db", required=True,
help="Path to where the features database will be stored")
ap.add_argument("-a", "--approx-images", type=int, default=250,
help="Approximate # of images in the dataset")
ap.add_argument("-b", "--max-buffer-size", type=int, default=50000,
help="Maximum buffer size for # of features to be stored in memory")
sys.argv[1:] = '-d output/data/training -f output/training_features.hdf5'.split()
args = vars(ap.parse_args())
# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create("GFTT")
descriptor = DescriptorExtractor_create("RootSIFT")
dad = DetectAndDescribe(detector, descriptor)
# initialize the feature indexer
fi = FeatureIndexer(args["features_db"], estNumImages=args["approx_images"],
maxBufferSize=args["max_buffer_size"], verbose=True)
# grab the image paths and randomly shuffle them
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)
# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
# check to see if progress should be displayed
if i > 0 and i % 10 == 0:
fi._debug("processed {} images".format(i), msgType="[PROGRESS]")
# load the image and pre-process it
image = cv2.imread(imagePath)
image = imutils.resize(image, width=320)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# describe the image
(kps, descs) = dad.describe(image)
# if either the keypoints or descriptors are None, then ignore the image
if kps is None or descs is None:
continue
# extract the image filename and label from the path, then index the features
(label, filename) = imagePath.split("/")[-2:]
k = "{}:{}".format(label, filename)
fi.add(k, image.shape, kps, descs)
# finish the indexing process
fi.finish()
| 33.512821
| 113
| 0.757077
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 18 13:02:47 2020
@author: user
"""
# python David_4_7_2_index_features.py --dataset output/data/training --features-db output/training_features.hdf5
# python David_4_7_2_index_features.py
# import the necessary packages
from __future__ import print_function
from pyimagesearch.descriptors import DetectAndDescribe
from pyimagesearch.indexer import FeatureIndexer
from imutils.feature import FeatureDetector_create, DescriptorExtractor_create
from imutils import paths
import argparse
import imutils
import random
import cv2
import sys
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="Path to the directory that contains the images to be indexed")
ap.add_argument("-f", "--features-db", required=True,
help="Path to where the features database will be stored")
ap.add_argument("-a", "--approx-images", type=int, default=250,
help="Approximate # of images in the dataset")
ap.add_argument("-b", "--max-buffer-size", type=int, default=50000,
help="Maximum buffer size for # of features to be stored in memory")
sys.argv[1:] = '-d output/data/training -f output/training_features.hdf5'.split()
args = vars(ap.parse_args())
# initialize the keypoint detector, local invariant descriptor, and the descriptor
# pipeline
detector = FeatureDetector_create("GFTT")
descriptor = DescriptorExtractor_create("RootSIFT")
dad = DetectAndDescribe(detector, descriptor)
# initialize the feature indexer
fi = FeatureIndexer(args["features_db"], estNumImages=args["approx_images"],
maxBufferSize=args["max_buffer_size"], verbose=True)
# grab the image paths and randomly shuffle them
imagePaths = list(paths.list_images(args["dataset"]))
random.shuffle(imagePaths)
# loop over the images in the dataset
for (i, imagePath) in enumerate(imagePaths):
# check to see if progress should be displayed
if i > 0 and i % 10 == 0:
fi._debug("processed {} images".format(i), msgType="[PROGRESS]")
# load the image and pre-process it
image = cv2.imread(imagePath)
image = imutils.resize(image, width=320)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# describe the image
(kps, descs) = dad.describe(image)
# if either the keypoints or descriptors are None, then ignore the image
if kps is None or descs is None:
continue
# extract the image filename and label from the path, then index the features
(label, filename) = imagePath.split("/")[-2:]
k = "{}:{}".format(label, filename)
fi.add(k, image.shape, kps, descs)
# finish the indexing process
fi.finish()
| 0
| 0
| 0
|
589b7b1599bf21d56f84a0ca7d37adf5f0fdf3d9
| 3,247
|
py
|
Python
|
hw3.py
|
DrawnWren/nandprogramming
|
ec67d1babac393c37b33012bc92aa30938367782
|
[
"MIT"
] | null | null | null |
hw3.py
|
DrawnWren/nandprogramming
|
ec67d1babac393c37b33012bc92aa30938367782
|
[
"MIT"
] | null | null | null |
hw3.py
|
DrawnWren/nandprogramming
|
ec67d1babac393c37b33012bc92aa30938367782
|
[
"MIT"
] | null | null | null |
""" Basic template file that you should fill in for Problem Set 3. Some util
functions are provided from the NAND notebooks online that implement some
of the NAND essentials. """
from util import EVAL
from util import TRUTH
from util import NANDProgram
# TODO: Implement this function and return a string representation of its NAND
# implementation. You don't have to use the class we supplied - you could use
# other methods of building up your NAND program from scratch.
def nandsquare(n):
'''Takes in an integer n. Outputs the string representation of a NAND prog
that takes in inputs x_0, ..., x_{n-1} and squares it mod 2^n. The output
will be y_0, ..., y_{n-1}. The first digit will be the least significant
digit (ex: 110001 --> 35)'''
# creates a blank NAND program with n inputs and n outputs.
prog = NANDProgram(n, n)
# now add lines to your NAND program by calling python functions like
# prog.NAND() or prog.OR() or other helper functions. For an example, take
# a look at the stuff after if __name__ == '__main__':
# "compiles" your completed program as a NAND program string.
return str(prog)
def rightshift(n):
'''Returns a program that takes [x_0,...x_n] as inputs and returns [0,...,x_n-1] '''
# TODO: Do this for bonus points and the leaderboard.
def nandsquare256():
'''Implement nandsquare for a specific input size, n=256. This result gets
placed on the leaderboard for extra credit. If you get close to the top
score on the leaderboard, you'll still recieve BONUS POINTS!!!'''
raise NotImplementedError
def badadder(N):
'''Should create a NAND adder that takes two n digits and outputs an n digit
because it's bad'''
return
# Examples of using the NANDProgram class to build NAND Programs. Please don't
# worry too much about the details of using this class - this is not a class
# about designing NAND programs.
def nandadder(N):
'''Creates a NAND adder that takes in two n-digit binary numbers and gets
the sum, returning a n+1-digit binary number. Returns the string repr. of
the NAND program created.'''
nand = NANDProgram(2 * N, N + 1, debug=False) #set debug=True to show debug lines
nand.ONE("ONE")
carry = nand.allocate()
nand.ADD_3(nand.output_var(0), carry,
nand.input_var(0), nand.input_var(N), nand.NAND("ZERO", "ONE", "ONE"), debug=True)
last_carry = ""
for i in range(1, N - 1):
last_carry = carry
carry = nand.allocate()
nand.ADD_3(nand.output_var(i), carry,
nand.input_var(i), nand.input_var(N + i), last_carry, debug=True)
nand.ADD_3(nand.output_var(N-1), nand.output_var(N),
nand.input_var(N-1), nand.input_var(2 * N - 1), carry, debug=True)
return str(nand)
if __name__ == '__main__':
# Generate the string representation of a NAND prog. that adds numbers
addfive = str(nandadder(10))
# Input Number 1: 11110 --> 15
# Input Number 2: 10110 --> 13 1111010110
# Expected Output: 28 --> 001110
#816 0000110011
#877 1011011011
# 10111001011
print(EVAL(addfive,'00001100111011011011'))
| 41.101266
| 98
| 0.668309
|
""" Basic template file that you should fill in for Problem Set 3. Some util
functions are provided from the NAND notebooks online that implement some
of the NAND essentials. """
from util import EVAL
from util import TRUTH
from util import NANDProgram
# TODO: Implement this function and return a string representation of its NAND
# implementation. You don't have to use the class we supplied - you could use
# other methods of building up your NAND program from scratch.
def nandsquare(n):
'''Takes in an integer n. Outputs the string representation of a NAND prog
that takes in inputs x_0, ..., x_{n-1} and squares it mod 2^n. The output
will be y_0, ..., y_{n-1}. The first digit will be the least significant
digit (ex: 110001 --> 35)'''
# creates a blank NAND program with n inputs and n outputs.
prog = NANDProgram(n, n)
# now add lines to your NAND program by calling python functions like
# prog.NAND() or prog.OR() or other helper functions. For an example, take
# a look at the stuff after if __name__ == '__main__':
# "compiles" your completed program as a NAND program string.
return str(prog)
def rightshift(n):
'''Returns a program that takes [x_0,...x_n] as inputs and returns [0,...,x_n-1] '''
# TODO: Do this for bonus points and the leaderboard.
def nandsquare256():
'''Implement nandsquare for a specific input size, n=256. This result gets
placed on the leaderboard for extra credit. If you get close to the top
score on the leaderboard, you'll still recieve BONUS POINTS!!!'''
raise NotImplementedError
def badadder(N):
'''Should create a NAND adder that takes two n digits and outputs an n digit
because it's bad'''
return
# Examples of using the NANDProgram class to build NAND Programs. Please don't
# worry too much about the details of using this class - this is not a class
# about designing NAND programs.
def nandadder(N):
'''Creates a NAND adder that takes in two n-digit binary numbers and gets
the sum, returning a n+1-digit binary number. Returns the string repr. of
the NAND program created.'''
nand = NANDProgram(2 * N, N + 1, debug=False) #set debug=True to show debug lines
nand.ONE("ONE")
carry = nand.allocate()
nand.ADD_3(nand.output_var(0), carry,
nand.input_var(0), nand.input_var(N), nand.NAND("ZERO", "ONE", "ONE"), debug=True)
last_carry = ""
for i in range(1, N - 1):
last_carry = carry
carry = nand.allocate()
nand.ADD_3(nand.output_var(i), carry,
nand.input_var(i), nand.input_var(N + i), last_carry, debug=True)
nand.ADD_3(nand.output_var(N-1), nand.output_var(N),
nand.input_var(N-1), nand.input_var(2 * N - 1), carry, debug=True)
return str(nand)
if __name__ == '__main__':
# Generate the string representation of a NAND prog. that adds numbers
addfive = str(nandadder(10))
# Input Number 1: 11110 --> 15
# Input Number 2: 10110 --> 13 1111010110
# Expected Output: 28 --> 001110
#816 0000110011
#877 1011011011
# 10111001011
print(EVAL(addfive,'00001100111011011011'))
| 0
| 0
| 0
|
ea6c2ca88d47675d9aa8565970ee7ab5480b4ae9
| 2,320
|
py
|
Python
|
Chapter10/myproject_docker/apps/movies/migrations/0001_initial.py
|
PacktPublishing/Django-2-Web-Development-Cookbook-Third-Edition
|
f129613e2b1d00f5c76649025ae4d568f6286f2c
|
[
"MIT"
] | 75
|
2018-12-03T02:35:29.000Z
|
2021-11-08T13:13:34.000Z
|
Chapter10/virtualenvs/myproject_env/project/django-myproject/movies/migrations/0001_initial.py
|
PacktPublishing/Django-2-Web-Development-Cookbook-Third-Edition
|
f129613e2b1d00f5c76649025ae4d568f6286f2c
|
[
"MIT"
] | 3
|
2019-08-11T13:35:01.000Z
|
2020-09-29T06:52:36.000Z
|
Chapter08/virtualenvs/myproject_env/project/django-myproject/movies/migrations/0001_initial.py
|
PacktPublishing/Django-2-Web-Development-Cookbook-Third-Edition
|
f129613e2b1d00f5c76649025ae4d568f6286f2c
|
[
"MIT"
] | 45
|
2018-11-03T14:03:22.000Z
|
2021-08-25T07:39:33.000Z
|
# Generated by Django 2.1.1 on 2018-09-16 08:39
from django.db import migrations, models
| 42.962963
| 280
| 0.515517
|
# Generated by Django 2.1.1 on 2018-09-16 08:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Actor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=40, verbose_name='First name')),
('last_name', models.CharField(max_length=40, verbose_name='Last name')),
],
),
migrations.CreateModel(
name='Director',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=40, verbose_name='First name')),
('last_name', models.CharField(max_length=40, verbose_name='Last name')),
],
),
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='Title')),
],
),
migrations.CreateModel(
name='Movie',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='Title')),
('rating', models.PositiveIntegerField(choices=[(1, '★☆☆☆☆☆☆☆☆☆'), (2, '★★☆☆☆☆☆☆☆☆'), (3, '★★★☆☆☆☆☆☆☆'), (4, '★★★★☆☆☆☆☆☆'), (5, '★★★★★☆☆☆☆☆'), (6, '★★★★★★☆☆☆☆'), (7, '★★★★★★★☆☆☆'), (8, '★★★★★★★★☆☆'), (9, '★★★★★★★★★☆'), (10, '★★★★★★★★★★')], verbose_name='Rating')),
('actors', models.ManyToManyField(blank=True, to='movies.Actor')),
('directors', models.ManyToManyField(blank=True, to='movies.Director')),
('genres', models.ManyToManyField(blank=True, to='movies.Genre')),
],
options={
'verbose_name': 'Movie',
'verbose_name_plural': 'Movies',
'ordering': ['title'],
},
),
]
| 0
| 2,406
| 23
|
4d880e3e2d2a468a977f492e0eda5c5352ab890e
| 165
|
py
|
Python
|
overlays/alt_s.py
|
werpu/emulation_tools
|
8293fbf566c66362fc7238cacdea118da5b86d9d
|
[
"MIT"
] | null | null | null |
overlays/alt_s.py
|
werpu/emulation_tools
|
8293fbf566c66362fc7238cacdea118da5b86d9d
|
[
"MIT"
] | 4
|
2020-10-06T14:49:27.000Z
|
2021-08-31T19:07:47.000Z
|
overlays/alt_s.py
|
werpu/emulation_tools
|
8293fbf566c66362fc7238cacdea118da5b86d9d
|
[
"MIT"
] | null | null | null |
# save snapshot
from evdev import UInput, ecodes
cfg = globals()["config"]
drv = globals()["drivers"]["keybd1"]
drv.press_keys(ecodes.KEY_LEFTALT, ecodes.KEY_S)
| 16.5
| 48
| 0.721212
|
# save snapshot
from evdev import UInput, ecodes
cfg = globals()["config"]
drv = globals()["drivers"]["keybd1"]
drv.press_keys(ecodes.KEY_LEFTALT, ecodes.KEY_S)
| 0
| 0
| 0
|
99a3846b765e385bd108d18e68d7d370aff0fbab
| 951
|
py
|
Python
|
src/rfdoc/rfdocapp/utils/__init__.py
|
elrandira/rfdoc
|
23a5f510f6cd74362982253268f19700b4a1acf4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/rfdoc/rfdocapp/utils/__init__.py
|
elrandira/rfdoc
|
23a5f510f6cd74362982253268f19700b4a1acf4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/rfdoc/rfdocapp/utils/__init__.py
|
elrandira/rfdoc
|
23a5f510f6cd74362982253268f19700b4a1acf4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2022-02-01T16:08:43.000Z
|
2022-02-01T16:08:43.000Z
|
# Copyright 2009-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rfdoc.rfdocapp.utils.robot_htmlutils import html_escape
| 31.7
| 74
| 0.727655
|
# Copyright 2009-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rfdoc.rfdocapp.utils.robot_htmlutils import html_escape
def normalize(string):
return string.lower().replace(' ', '')
def eq(str1, str2):
return normalize(str1) == normalize(str2)
def eq_any(string, strings):
string = normalize(string)
for s in strings:
if normalize(s) == string:
return True
return False
| 224
| 0
| 69
|
fad42a2176634bf90465a537b384408b95236de7
| 637
|
py
|
Python
|
algorthm/tests/test_Search.py
|
dinaklal/algorthm
|
5adc65ba3eecb4d60b0193ff8d237828c621701b
|
[
"MIT"
] | 1
|
2019-08-16T10:17:56.000Z
|
2019-08-16T10:17:56.000Z
|
algorthm/tests/test_Search.py
|
dinaklal/algorthm
|
5adc65ba3eecb4d60b0193ff8d237828c621701b
|
[
"MIT"
] | 3
|
2019-08-09T13:02:21.000Z
|
2019-08-13T13:35:04.000Z
|
algorthm/tests/test_Search.py
|
dinaklal/algorthm
|
5adc65ba3eecb4d60b0193ff8d237828c621701b
|
[
"MIT"
] | 1
|
2019-08-20T10:23:24.000Z
|
2019-08-20T10:23:24.000Z
|
import pytest
import sys
sys.path.append('../')
from search import *
| 22.75
| 37
| 0.621664
|
import pytest
import sys
sys.path.append('../')
from search import *
def test_linearSearch():
a= linearSearch([1,3,2],2)
assert a == 2
def test_binarySearch():
a= binarySearch([1,3,2],3)
assert a == 1
def test_binarySearch_2():
a= binarySearch([1,3,2,4,5,6],4)
assert a == 4
def test_jumpSearch():
a= jumpSearch([1,3,2],4)
assert a == -1
def test_fibonacciSearch():
a= fibonacciSearch([1,3,2],4)
assert a == -1
def test_exponentialSearch():
a= exponentialSearch([1,3,2],4)
assert a == -1
def test_interpolationSearch():
a= interpolationSearch([1,3,2],4)
assert a == -1
| 407
| 0
| 154
|
0f57c0b03feb328d03ec6bdf4966a6d4ecf49694
| 1,609
|
py
|
Python
|
src/foreign_if/python/main/python/frovedis/dataframe/info.py
|
wmeddie/frovedis
|
c134e5e64114799cc7c265c72525ff98d06b49c1
|
[
"BSD-2-Clause"
] | null | null | null |
src/foreign_if/python/main/python/frovedis/dataframe/info.py
|
wmeddie/frovedis
|
c134e5e64114799cc7c265c72525ff98d06b49c1
|
[
"BSD-2-Clause"
] | null | null | null |
src/foreign_if/python/main/python/frovedis/dataframe/info.py
|
wmeddie/frovedis
|
c134e5e64114799cc7c265c72525ff98d06b49c1
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
from ..exrpc.rpclib import *
from ..exrpc.server import *
from ..mllib.model_util import ModelID
class df_to_sparse_info:
'''A python container for holding information related to dataframe to sparse conversion'''
| 35.755556
| 92
| 0.715351
|
#!/usr/bin/env python
from ..exrpc.rpclib import *
from ..exrpc.server import *
from ..mllib.model_util import ModelID
class df_to_sparse_info:
'''A python container for holding information related to dataframe to sparse conversion'''
def __init__(cls,info_id):
cls.__uid = info_id
def load(cls,dirname):
cls.release()
if (type(dirname).__name__ != 'str'):
raise TypeError("Expected String, Found: " + type(dirname).__name__)
info_id = ModelID.get() #getting unique id for conversion info to be registered
(host, port) = FrovedisServer.getServerInstance()
rpclib.load_dftable_to_sparse_info(host,port,info_id,dirname.encode('ascii'))
cls.__uid = info_id
excpt = rpclib.check_server_exception()
if excpt["status"]: raise RuntimeError(excpt["info"])
return cls
def save(cls,dirname):
if cls.__uid is None:
raise ValueError("Operation on invalid frovedis dftable_to_sparse_info!")
if (type(dirname).__name__ != 'str'):
raise TypeError("Expected String, Found: " + type(dirname).__name__)
(host, port) = FrovedisServer.getServerInstance()
rpclib.save_dftable_to_sparse_info(host,port,cls.get(),dirname.encode('ascii'))
excpt = rpclib.check_server_exception()
if excpt["status"]: raise RuntimeError(excpt["info"])
def release(cls):
if cls.__uid is None:
raise ValueError("Operation on invalid frovedis dftable_to_sparse_info!")
(host, port) = FrovedisServer.getServerInstance()
rpclib.release_dftable_to_sparse_info(host,port,cls.get())
cls.__uid = None
def get(cls): return cls.__uid
| 1,239
| 0
| 125
|
b9c0f6e6f3476e8f124a63b6175df5b29e5eaadf
| 655
|
py
|
Python
|
recipes/migrations/0018_auto_20200912_1906.py
|
sh4rpy/foodgram
|
4ebc9655f9a68e05ebb83e7f2f2a2e04128d6713
|
[
"BSD-3-Clause"
] | null | null | null |
recipes/migrations/0018_auto_20200912_1906.py
|
sh4rpy/foodgram
|
4ebc9655f9a68e05ebb83e7f2f2a2e04128d6713
|
[
"BSD-3-Clause"
] | 9
|
2021-04-08T20:01:45.000Z
|
2022-03-12T00:48:46.000Z
|
recipes/migrations/0018_auto_20200912_1906.py
|
sh4rpy/foodgram
|
4ebc9655f9a68e05ebb83e7f2f2a2e04128d6713
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.1.1 on 2020-09-12 19:06
from django.db import migrations
import multiselectfield.db.fields
| 27.291667
| 196
| 0.60458
|
# Generated by Django 3.1.1 on 2020-09-12 19:06
from django.db import migrations
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('recipes', '0017_auto_20200912_1902'),
]
operations = [
migrations.RemoveField(
model_name='recipe',
name='tags',
),
migrations.AddField(
model_name='recipe',
name='tags',
field=multiselectfield.db.fields.MultiSelectField(blank=True, choices=[('breakfast', 'завтрак'), ('lunch', 'обед'), ('dinner', 'ужин')], max_length=22, null=True, verbose_name='Теги'),
),
]
| 0
| 534
| 23
|
ad6f36bacc15247857315b4e0c0bbba3891346ae
| 14,122
|
py
|
Python
|
tests/test_validate.py
|
vmagamedov/harness
|
0e9d64295f937aa4476dbe5f084e80a3783edce7
|
[
"BSD-3-Clause"
] | 6
|
2020-03-26T16:49:54.000Z
|
2022-01-13T09:13:40.000Z
|
tests/test_validate.py
|
vmagamedov/harness
|
0e9d64295f937aa4476dbe5f084e80a3783edce7
|
[
"BSD-3-Clause"
] | 1
|
2020-03-14T16:47:51.000Z
|
2020-03-14T16:47:51.000Z
|
tests/test_validate.py
|
vmagamedov/harness
|
0e9d64295f937aa4476dbe5f084e80a3783edce7
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from ipaddress import ip_address
import pytest
from harness.runtime._validate import validate, ValidationError
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
@pytest.fixture()
def test_disabled(message_type):
"""
message Message {
message Inner {
option (validate.disabled) = true;
string value = 1 [(validate.rules).string.const = "valid"];
}
Inner field = 1;
}
"""
validate(message_type(field=dict(value="invalid")))
def test_oneof_required(message_type):
"""
message Message {
oneof type {
option (validate.required) = true;
string foo = 1;
int32 bar = 2;
}
}
"""
validate(message_type(foo="test"))
validate(message_type(bar=42))
with pytest.raises(ValidationError, match="Oneof type is required"):
validate(message_type())
def test_float_const(message_type):
"""
message Message {
float value = 1 [(validate.rules).float.const = 4.2];
}
"""
validate(message_type(value=4.2))
with pytest.raises(ValidationError, match="value not equal to"):
validate(message_type(value=2.4))
def test_timestamp_lt(message_type, timestamp_type):
"""
message Message {
google.protobuf.Timestamp value = 1 [
(validate.rules).timestamp.lt = {seconds: 1000}
];
}
"""
validate(message_type(value=timestamp_type(seconds=999)))
with pytest.raises(ValidationError, match="is not lesser than"):
validate(message_type(value=timestamp_type(seconds=1000)))
def test_timestamp_within(message_type, timestamp_type):
"""
message Message {
google.protobuf.Timestamp value = 1 [
(validate.rules).timestamp.within = {seconds: 60}
];
}
"""
value = timestamp_type()
value.GetCurrentTime()
validate(message_type(value=value))
valid_seconds = value.seconds
with pytest.raises(ValidationError, match="value is not within 60s from now"):
value.seconds = valid_seconds - 100
validate(message_type(value=value))
with pytest.raises(ValidationError, match="value is not within 60s from now"):
value.seconds = valid_seconds - 100
validate(message_type(value=value))
value.seconds = valid_seconds
validate(message_type(value=value))
def test_duration_in(message_type, duration_type):
"""
message Message {
google.protobuf.Duration value = 1 [
(validate.rules).duration.in = {seconds: 60},
(validate.rules).duration.in = {seconds: 30}
];
}
"""
validate(message_type(value=duration_type(seconds=60)))
with pytest.raises(ValidationError, match="value not in {60s, 30s}"):
validate(message_type(value=duration_type(seconds=120)))
def test_duration_lte(message_type, duration_type):
"""
message Message {
google.protobuf.Duration value = 1 [
(validate.rules).duration.lte = {seconds: 60}
];
}
"""
validate(message_type(value=duration_type(seconds=60)))
with pytest.raises(
ValidationError, match="value is not lesser than or equal to 60s"
):
validate(message_type(value=duration_type(seconds=60, nanos=1)))
def test_enum_defined_only(message_type):
"""
message Message {
enum Foo {
A = 0;
B = 1;
}
Foo value = 1 [(validate.rules).enum.defined_only = true];
}
"""
validate(message_type())
validate(message_type(value=1))
with pytest.raises(ValidationError, match="value is not defined"):
validate(message_type(value=2))
def test_repeated_unique(message_type):
"""
message Message {
repeated int32 value = 1 [(validate.rules).repeated.unique = true];
}
"""
validate(message_type(value=[1, 2, 3]))
with pytest.raises(
ValidationError,
match="value must contain unique items; repeated items: \\[2, 3\\]",
):
validate(message_type(value=[1, 2, 3, 2, 4, 3, 5]))
def test_repeated_items(message_type):
"""
message Message {
repeated int32 field = 1 [(validate.rules).repeated.items.int32.lt = 5];
}
"""
validate(message_type(field=[1, 2, 3, 4]))
with pytest.raises(ValidationError, match="field\\[\\] is not lesser than 5"):
validate(message_type(field=[1, 2, 3, 4, 5]))
def test_map_key(message_type):
"""
message Message {
map<string, int32> field = 1 [(validate.rules).map.keys.string.min_len = 3];
}
"""
validate(message_type(field={"test": 42}))
with pytest.raises(ValidationError, match="field<key> length is less than 3"):
validate(message_type(field={"t": 42}))
def test_map_values(message_type):
"""
message Message {
map<string, int32> field = 1 [(validate.rules).map.values.int32.const = 42];
}
"""
validate(message_type(field={"test": 42}))
with pytest.raises(ValidationError, match="field<value> not equal to 42"):
validate(message_type(field={"test": 43}))
def test_any_in(message_type, any_type, duration_type, timestamp_type):
"""
message Message {
google.protobuf.Any field = 1 [(validate.rules).any.in = "type.googleapis.com/google.protobuf.Duration"];
}
""" # noqa
any_1 = any_type()
any_1.Pack(duration_type(seconds=42))
validate(message_type(field=any_1))
with pytest.raises(ValidationError, match="field.type_url not in"):
any_2 = any_type()
any_2.Pack(timestamp_type(seconds=42))
validate(message_type(field=any_2))
def test_nested(message_type):
"""
message Message {
message Inner {
string value = 1 [(validate.rules).string.const = "valid"];
}
Inner field = 1;
}
"""
validate(message_type())
validate(message_type(field=dict(value="valid")))
with pytest.raises(ValidationError, match="value not equal to 'valid'"):
validate(message_type(field=dict(value="invalid")))
def test_message_skip(message_type):
"""
message Message {
message Inner {
string value = 1 [(validate.rules).string.const = "valid"];
}
Inner field = 1 [(validate.rules).message.skip = true];
}
"""
validate(message_type(field=dict(value="invalid")))
def test_message_required(message_type):
"""
message Message {
message Inner {
string value = 1;
}
Inner field = 1 [(validate.rules).message.required = true];
}
"""
validate(message_type(field=dict()))
validate(message_type(field=dict(value="test")))
with pytest.raises(ValidationError, match="field is required"):
validate(message_type())
def test_email(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.email = true];
}
"""
validate(message_type(field="admin@example.com"))
validate(
message_type(field="Jean-Luc Picard <jean-luc.pickard@starfleet.milkyway>")
)
with pytest.raises(ValidationError, match="field contains invalid email address"):
validate(message_type(field="example.com"))
with pytest.raises(
ValidationError, match="field contains more than one email address"
):
validate(message_type(field="foo@example.com, bar@example.com"))
def test_hostname(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.hostname = true];
}
"""
validate(message_type(field="example.com"))
validate(message_type(field="Example.com"))
with pytest.raises(ValidationError, match="field contains invalid hostname"):
validate(message_type(field="-example.com"))
def test_string_prefix(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.prefix = "har"];
}
"""
validate(message_type(field="harness"))
with pytest.raises(ValidationError, match="field does not start with prefix 'har'"):
validate(message_type(field="bottle"))
def test_string_pattern(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.pattern = "^(foo|bar)-app$"];
}
"""
validate(message_type(field="foo-app"))
validate(message_type(field="bar-app"))
with pytest.raises(
ValidationError,
match=re.escape("field does not match pattern '^(foo|bar)-app$'"),
):
validate(message_type(field="invalid"))
def test_string_ip(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.ip = true];
}
"""
validate(message_type(field="0.0.0.0"))
validate(message_type(field="127.0.0.1"))
validate(message_type(field="::1"))
validate(message_type(field="2001:0db8:85a3:0000:0000:8a2e:0370:7334"))
with pytest.raises(ValidationError, match="field contains invalid IP address"):
validate(message_type(field="0.0.0"))
def test_string_ipv4(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.ipv4 = true];
}
"""
validate(message_type(field="0.0.0.0"))
validate(message_type(field="127.0.0.1"))
with pytest.raises(ValidationError, match="field contains invalid IPv4 address"):
validate(message_type(field="0.0.0"))
with pytest.raises(ValidationError, match="field contains invalid IPv4 address"):
validate(message_type(field="2001:0db8:85a3:0000:0000:8a2e:0370:7334"))
def test_string_ipv6(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.ipv6 = true];
}
"""
validate(message_type(field="::1"))
validate(message_type(field="2001:0db8:85a3:0000:0000:8a2e:0370:7334"))
with pytest.raises(ValidationError, match="field contains invalid IPv6 address"):
validate(message_type(field="2001:0db8:85a3:0000:0000:8a2e:0370:733."))
with pytest.raises(ValidationError, match="field contains invalid IPv6 address"):
validate(message_type(field="127.0.0.1"))
def test_bytes_ip(message_type):
"""
message Message {
bytes field = 1 [(validate.rules).bytes.ip = true];
}
"""
validate(message_type(field=ip_address("0.0.0.0").packed))
validate(message_type(field=ip_address("127.0.0.1").packed))
validate(message_type(field=ip_address("::1").packed))
validate(
message_type(field=ip_address("2001:0db8:85a3:0000:0000:8a2e:0370:7334").packed)
)
with pytest.raises(ValidationError, match="field contains invalid IP address"):
validate(message_type(field=ip_address("0.0.0.0").packed[:-1]))
def test_bytes_ipv4(message_type):
"""
message Message {
bytes field = 1 [(validate.rules).bytes.ipv4 = true];
}
"""
validate(message_type(field=ip_address("0.0.0.0").packed))
validate(message_type(field=ip_address("127.0.0.1").packed))
with pytest.raises(ValidationError, match="field contains invalid IPv4 address"):
validate(message_type(field=b"deadbeef"))
with pytest.raises(ValidationError, match="field contains invalid IPv4 address"):
validate(
message_type(
field=ip_address("2001:0db8:85a3:0000:0000:8a2e:0370:7334").packed
)
)
def test_bytes_ipv6(message_type):
"""
message Message {
bytes field = 1 [(validate.rules).bytes.ipv6 = true];
}
"""
validate(message_type(field=ip_address("::1").packed))
validate(
message_type(field=ip_address("2001:0db8:85a3:0000:0000:8a2e:0370:7334").packed)
)
with pytest.raises(ValidationError, match="field contains invalid IPv6 address"):
validate(message_type(field=b"deadbeef"))
with pytest.raises(ValidationError, match="field contains invalid IPv6 address"):
validate(message_type(field=ip_address("127.0.0.1").packed))
def test_address(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.address = true];
}
"""
validate(message_type(field="::1"))
validate(message_type(field="127.0.0.1"))
validate(message_type(field="Example.com"))
with pytest.raises(ValidationError, match="field contains invalid address"):
validate(message_type(field="invalid"))
def test_uri(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.uri = true];
}
"""
validate(message_type(field="http://google.com"))
validate(message_type(field="http://127.0.0.1/page.html#fragment"))
with pytest.raises(ValidationError, match="field contains invalid URI"):
validate(message_type(field="/local/path"))
def test_uri_ref(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.uri_ref = true];
}
"""
validate(message_type(field="http://google.com"))
validate(message_type(field="/local/path"))
with pytest.raises(ValidationError, match="field contains invalid URI-reference"):
validate(message_type(field="\\invalid\\path"))
def test_uuid(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.uuid = true];
}
"""
validate(message_type(field="adbf3fd4-6a41-41a8-b5c1-df09adc3a9b3"))
validate(message_type(field="ADBF3FD4-6A41-41A8-B5C1-DF09ADC3A9B3"))
with pytest.raises(ValidationError, match="field contains invalid UUID"):
validate(message_type(field="adbf3fd46a4141a8b5c1df09adc3a9b3"))
with pytest.raises(ValidationError, match="field contains invalid UUID"):
validate(message_type(field="adbf3fd4-6a41-41a8-b5c1-df09adc3a9b3-ext"))
| 31.382222
| 113
| 0.652599
|
import re
from ipaddress import ip_address
import pytest
from harness.runtime._validate import validate, ValidationError
@pytest.fixture()
def message_type(message_types, package):
return message_types[f"{package}.Message"]
@pytest.fixture()
def timestamp_type(message_types):
return message_types["google.protobuf.Timestamp"]
@pytest.fixture()
def duration_type(message_types):
return message_types["google.protobuf.Duration"]
@pytest.fixture()
def any_type(message_types):
return message_types["google.protobuf.Any"]
def test_disabled(message_type):
"""
message Message {
message Inner {
option (validate.disabled) = true;
string value = 1 [(validate.rules).string.const = "valid"];
}
Inner field = 1;
}
"""
validate(message_type(field=dict(value="invalid")))
def test_oneof_required(message_type):
"""
message Message {
oneof type {
option (validate.required) = true;
string foo = 1;
int32 bar = 2;
}
}
"""
validate(message_type(foo="test"))
validate(message_type(bar=42))
with pytest.raises(ValidationError, match="Oneof type is required"):
validate(message_type())
def test_float_const(message_type):
"""
message Message {
float value = 1 [(validate.rules).float.const = 4.2];
}
"""
validate(message_type(value=4.2))
with pytest.raises(ValidationError, match="value not equal to"):
validate(message_type(value=2.4))
def test_timestamp_lt(message_type, timestamp_type):
"""
message Message {
google.protobuf.Timestamp value = 1 [
(validate.rules).timestamp.lt = {seconds: 1000}
];
}
"""
validate(message_type(value=timestamp_type(seconds=999)))
with pytest.raises(ValidationError, match="is not lesser than"):
validate(message_type(value=timestamp_type(seconds=1000)))
def test_timestamp_within(message_type, timestamp_type):
"""
message Message {
google.protobuf.Timestamp value = 1 [
(validate.rules).timestamp.within = {seconds: 60}
];
}
"""
value = timestamp_type()
value.GetCurrentTime()
validate(message_type(value=value))
valid_seconds = value.seconds
with pytest.raises(ValidationError, match="value is not within 60s from now"):
value.seconds = valid_seconds - 100
validate(message_type(value=value))
with pytest.raises(ValidationError, match="value is not within 60s from now"):
value.seconds = valid_seconds - 100
validate(message_type(value=value))
value.seconds = valid_seconds
validate(message_type(value=value))
def test_duration_in(message_type, duration_type):
"""
message Message {
google.protobuf.Duration value = 1 [
(validate.rules).duration.in = {seconds: 60},
(validate.rules).duration.in = {seconds: 30}
];
}
"""
validate(message_type(value=duration_type(seconds=60)))
with pytest.raises(ValidationError, match="value not in {60s, 30s}"):
validate(message_type(value=duration_type(seconds=120)))
def test_duration_lte(message_type, duration_type):
"""
message Message {
google.protobuf.Duration value = 1 [
(validate.rules).duration.lte = {seconds: 60}
];
}
"""
validate(message_type(value=duration_type(seconds=60)))
with pytest.raises(
ValidationError, match="value is not lesser than or equal to 60s"
):
validate(message_type(value=duration_type(seconds=60, nanos=1)))
def test_enum_defined_only(message_type):
"""
message Message {
enum Foo {
A = 0;
B = 1;
}
Foo value = 1 [(validate.rules).enum.defined_only = true];
}
"""
validate(message_type())
validate(message_type(value=1))
with pytest.raises(ValidationError, match="value is not defined"):
validate(message_type(value=2))
def test_repeated_unique(message_type):
"""
message Message {
repeated int32 value = 1 [(validate.rules).repeated.unique = true];
}
"""
validate(message_type(value=[1, 2, 3]))
with pytest.raises(
ValidationError,
match="value must contain unique items; repeated items: \\[2, 3\\]",
):
validate(message_type(value=[1, 2, 3, 2, 4, 3, 5]))
def test_repeated_items(message_type):
"""
message Message {
repeated int32 field = 1 [(validate.rules).repeated.items.int32.lt = 5];
}
"""
validate(message_type(field=[1, 2, 3, 4]))
with pytest.raises(ValidationError, match="field\\[\\] is not lesser than 5"):
validate(message_type(field=[1, 2, 3, 4, 5]))
def test_map_key(message_type):
"""
message Message {
map<string, int32> field = 1 [(validate.rules).map.keys.string.min_len = 3];
}
"""
validate(message_type(field={"test": 42}))
with pytest.raises(ValidationError, match="field<key> length is less than 3"):
validate(message_type(field={"t": 42}))
def test_map_values(message_type):
"""
message Message {
map<string, int32> field = 1 [(validate.rules).map.values.int32.const = 42];
}
"""
validate(message_type(field={"test": 42}))
with pytest.raises(ValidationError, match="field<value> not equal to 42"):
validate(message_type(field={"test": 43}))
def test_any_in(message_type, any_type, duration_type, timestamp_type):
"""
message Message {
google.protobuf.Any field = 1 [(validate.rules).any.in = "type.googleapis.com/google.protobuf.Duration"];
}
""" # noqa
any_1 = any_type()
any_1.Pack(duration_type(seconds=42))
validate(message_type(field=any_1))
with pytest.raises(ValidationError, match="field.type_url not in"):
any_2 = any_type()
any_2.Pack(timestamp_type(seconds=42))
validate(message_type(field=any_2))
def test_nested(message_type):
"""
message Message {
message Inner {
string value = 1 [(validate.rules).string.const = "valid"];
}
Inner field = 1;
}
"""
validate(message_type())
validate(message_type(field=dict(value="valid")))
with pytest.raises(ValidationError, match="value not equal to 'valid'"):
validate(message_type(field=dict(value="invalid")))
def test_message_skip(message_type):
"""
message Message {
message Inner {
string value = 1 [(validate.rules).string.const = "valid"];
}
Inner field = 1 [(validate.rules).message.skip = true];
}
"""
validate(message_type(field=dict(value="invalid")))
def test_message_required(message_type):
"""
message Message {
message Inner {
string value = 1;
}
Inner field = 1 [(validate.rules).message.required = true];
}
"""
validate(message_type(field=dict()))
validate(message_type(field=dict(value="test")))
with pytest.raises(ValidationError, match="field is required"):
validate(message_type())
def test_email(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.email = true];
}
"""
validate(message_type(field="admin@example.com"))
validate(
message_type(field="Jean-Luc Picard <jean-luc.pickard@starfleet.milkyway>")
)
with pytest.raises(ValidationError, match="field contains invalid email address"):
validate(message_type(field="example.com"))
with pytest.raises(
ValidationError, match="field contains more than one email address"
):
validate(message_type(field="foo@example.com, bar@example.com"))
def test_hostname(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.hostname = true];
}
"""
validate(message_type(field="example.com"))
validate(message_type(field="Example.com"))
with pytest.raises(ValidationError, match="field contains invalid hostname"):
validate(message_type(field="-example.com"))
def test_string_prefix(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.prefix = "har"];
}
"""
validate(message_type(field="harness"))
with pytest.raises(ValidationError, match="field does not start with prefix 'har'"):
validate(message_type(field="bottle"))
def test_string_pattern(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.pattern = "^(foo|bar)-app$"];
}
"""
validate(message_type(field="foo-app"))
validate(message_type(field="bar-app"))
with pytest.raises(
ValidationError,
match=re.escape("field does not match pattern '^(foo|bar)-app$'"),
):
validate(message_type(field="invalid"))
def test_string_ip(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.ip = true];
}
"""
validate(message_type(field="0.0.0.0"))
validate(message_type(field="127.0.0.1"))
validate(message_type(field="::1"))
validate(message_type(field="2001:0db8:85a3:0000:0000:8a2e:0370:7334"))
with pytest.raises(ValidationError, match="field contains invalid IP address"):
validate(message_type(field="0.0.0"))
def test_string_ipv4(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.ipv4 = true];
}
"""
validate(message_type(field="0.0.0.0"))
validate(message_type(field="127.0.0.1"))
with pytest.raises(ValidationError, match="field contains invalid IPv4 address"):
validate(message_type(field="0.0.0"))
with pytest.raises(ValidationError, match="field contains invalid IPv4 address"):
validate(message_type(field="2001:0db8:85a3:0000:0000:8a2e:0370:7334"))
def test_string_ipv6(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.ipv6 = true];
}
"""
validate(message_type(field="::1"))
validate(message_type(field="2001:0db8:85a3:0000:0000:8a2e:0370:7334"))
with pytest.raises(ValidationError, match="field contains invalid IPv6 address"):
validate(message_type(field="2001:0db8:85a3:0000:0000:8a2e:0370:733."))
with pytest.raises(ValidationError, match="field contains invalid IPv6 address"):
validate(message_type(field="127.0.0.1"))
def test_bytes_ip(message_type):
"""
message Message {
bytes field = 1 [(validate.rules).bytes.ip = true];
}
"""
validate(message_type(field=ip_address("0.0.0.0").packed))
validate(message_type(field=ip_address("127.0.0.1").packed))
validate(message_type(field=ip_address("::1").packed))
validate(
message_type(field=ip_address("2001:0db8:85a3:0000:0000:8a2e:0370:7334").packed)
)
with pytest.raises(ValidationError, match="field contains invalid IP address"):
validate(message_type(field=ip_address("0.0.0.0").packed[:-1]))
def test_bytes_ipv4(message_type):
"""
message Message {
bytes field = 1 [(validate.rules).bytes.ipv4 = true];
}
"""
validate(message_type(field=ip_address("0.0.0.0").packed))
validate(message_type(field=ip_address("127.0.0.1").packed))
with pytest.raises(ValidationError, match="field contains invalid IPv4 address"):
validate(message_type(field=b"deadbeef"))
with pytest.raises(ValidationError, match="field contains invalid IPv4 address"):
validate(
message_type(
field=ip_address("2001:0db8:85a3:0000:0000:8a2e:0370:7334").packed
)
)
def test_bytes_ipv6(message_type):
"""
message Message {
bytes field = 1 [(validate.rules).bytes.ipv6 = true];
}
"""
validate(message_type(field=ip_address("::1").packed))
validate(
message_type(field=ip_address("2001:0db8:85a3:0000:0000:8a2e:0370:7334").packed)
)
with pytest.raises(ValidationError, match="field contains invalid IPv6 address"):
validate(message_type(field=b"deadbeef"))
with pytest.raises(ValidationError, match="field contains invalid IPv6 address"):
validate(message_type(field=ip_address("127.0.0.1").packed))
def test_address(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.address = true];
}
"""
validate(message_type(field="::1"))
validate(message_type(field="127.0.0.1"))
validate(message_type(field="Example.com"))
with pytest.raises(ValidationError, match="field contains invalid address"):
validate(message_type(field="invalid"))
def test_uri(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.uri = true];
}
"""
validate(message_type(field="http://google.com"))
validate(message_type(field="http://127.0.0.1/page.html#fragment"))
with pytest.raises(ValidationError, match="field contains invalid URI"):
validate(message_type(field="/local/path"))
def test_uri_ref(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.uri_ref = true];
}
"""
validate(message_type(field="http://google.com"))
validate(message_type(field="/local/path"))
with pytest.raises(ValidationError, match="field contains invalid URI-reference"):
validate(message_type(field="\\invalid\\path"))
def test_uuid(message_type):
"""
message Message {
string field = 1 [(validate.rules).string.uuid = true];
}
"""
validate(message_type(field="adbf3fd4-6a41-41a8-b5c1-df09adc3a9b3"))
validate(message_type(field="ADBF3FD4-6A41-41A8-B5C1-DF09ADC3A9B3"))
with pytest.raises(ValidationError, match="field contains invalid UUID"):
validate(message_type(field="adbf3fd46a4141a8b5c1df09adc3a9b3"))
with pytest.raises(ValidationError, match="field contains invalid UUID"):
validate(message_type(field="adbf3fd4-6a41-41a8-b5c1-df09adc3a9b3-ext"))
| 254
| 0
| 88
|
cba4882f9db73bfaee8807536b229122656e0814
| 16,021
|
py
|
Python
|
alipy/experiment/state_io.py
|
JlsBssmnn/ALiPy
|
57f5a1e4c0252436ecf2572da6973d054807add5
|
[
"BSD-3-Clause"
] | 1
|
2021-08-17T01:58:14.000Z
|
2021-08-17T01:58:14.000Z
|
alipy/experiment/state_io.py
|
JlsBssmnn/ALiPy
|
57f5a1e4c0252436ecf2572da6973d054807add5
|
[
"BSD-3-Clause"
] | null | null | null |
alipy/experiment/state_io.py
|
JlsBssmnn/ALiPy
|
57f5a1e4c0252436ecf2572da6973d054807add5
|
[
"BSD-3-Clause"
] | 1
|
2019-12-19T07:02:07.000Z
|
2019-12-19T07:02:07.000Z
|
"""
StateIO
Container to store state object.
Several useful functions are implemented in this class:
1. Saving intermediate results to files.
2. Recover workspace at any iteration (label set and unlabel set).
3. Recover workspace from the intermediate result file in case the program exits unexpectedly.
4. Gathering and checking the information stored in State object.
5. Print active learning progress: current_iteration, current_mean_performance, current_cost, etc.
"""
# Authors: Ying-Peng Tang
# License: BSD 3 clause
from __future__ import division
import collections.abc
import copy
import os
import pickle
import sys
import numpy as np
import prettytable as pt
from .state import State
from ..index import IndexCollection, MultiLabelIndexCollection
from ..index.multi_label_tools import check_index_multilabel
from ..utils.interface import BaseCollection
__all__ = ['StateIO',
]
class StateIO:
"""
A class to store states.
Functions including:
1. Saving intermediate results to files.
2. Recover workspace at any iteration (label set and unlabel set).
3. Recover workspace from the intermediate result file in case the program exits unexpectedly.
4. Gathering and checking the information stored in State object.
5. Print active learning progress: current_iteration, current_mean_performance, current_cost, etc.
Parameters
----------
round: int
Number of k-fold experiments loop. 0 <= round < k
train_idx: array_like
Training index of one fold experiment.
test_idx: array_like
Testing index of one fold experiment.
init_L: array_like
Initial labeled index of one fold experiment.
init_U: array_like
Initial unlabeled index of one fold experiment.
initial_point: object, optional (default=None)
The performance before any querying.
If not specify, the initial point of different methods will be different.
saving_path: str, optional (default='.')
Path to save the intermediate files. If None is given, it will
not save the intermediate result.
check_flag: bool, optional (default=True)
Whether to check the validity of states.
verbose: bool, optional (default=True)
Whether to print query information during the AL process.
print_interval: int optional (default=1)
How many queries will trigger a print when verbose is True.
"""
@classmethod
def load(cls, path):
"""Load StateIO object from file.
Parameters
----------
path: str
The path should be a specific .pkl file.
Returns
-------
object: StateIO
The StateIO object in the file.
"""
f = open(os.path.abspath(path), 'rb')
saver_from_file = pickle.load(f)
f.close()
return saver_from_file
def set_initial_point(self, perf):
"""The initial point of performance before querying.
Parameters
----------
perf: float
The performance value.
"""
self.initial_point = perf
def save(self):
"""Saving intermediate results to file."""
if self._saving_dir is None:
return
f = open(os.path.join(self._saving_dir, self._saving_file_name), 'wb')
pickle.dump(self, f)
f.close()
def add_state(self, state):
"""Add a State object to the container.
Parameters
----------
state: {dict, State}
State object to be added. Or a dictionary with
the following keys: ['select_index', 'queried_info', 'performance']
"""
if not isinstance(state, State):
assert isinstance(state, dict), "state must be dict or State object."
assert 'select_index' in state and 'queried_info' in state and 'performance' in state, "The dict must contain the following keys: ['select_index', 'queried_info', 'performance']"
self.__state_list.append(copy.deepcopy(state))
self.__update_info()
if self.__verbose and len(self) % self.__print_interval == 0:
if self._first_print:
print('\n' + self.__repr__(), end='')
self._first_print = False
else:
print('\r' + self._refresh_dataline(), end='')
sys.stdout.flush()
def get_state(self, index):
"""Get a State object in the container.
Parameters
----------
index: int
The index of the State object. 0 <= index < len(self)
Returns
-------
st: State
The State object in the previous iteration.
"""
assert (0 <= index < len(self))
return copy.deepcopy(self.__state_list[index])
def check_batch_size(self):
"""Check if all queries have the same batch size.
Returns
-------
result: bool
Whether all the states have the same batch size.
"""
ind_uni = np.unique(
[self.__state_list[i].batch_size for i in range(len(self.__state_list) - 1)], axis=0)
if len(ind_uni) == 1:
self.batch_size = ind_uni[0]
return True
else:
return False
def pop(self, i=None):
"""remove and return item at index (default last)."""
return self.__state_list.pop(i)
def recover_workspace(self, iteration=None):
"""Recover workspace after $iteration$ querying.
For example, if 0 is given, the initial workspace without any querying will be recovered.
Note that, the object itself will be recovered, the information after the iteration will be discarded.
Parameters
----------
iteration: int, optional(default=None)
Number of iteration to recover, start from 0.
If nothing given, it will return the current workspace.
Returns
-------
train_idx: list
Index of training set, shape like [n_training_samples]
test_idx: list
Index of testing set, shape like [n_testing_samples]
label_idx: list
Index of labeling set, shape like [n_labeling_samples]
unlabel_idx: list
Index of unlabeling set, shape like [n_unlabeling_samples]
"""
if iteration is None:
iteration = len(self.__state_list)
assert (0 <= iteration <= len(self))
work_U = copy.deepcopy(self.init_U)
work_L = copy.deepcopy(self.init_L)
for i in range(iteration):
state = self.__state_list[i]
work_U.difference_update(state.get_value('select_index'))
work_L.update(state.get_value('select_index'))
self.__state_list = self.__state_list[0:iteration]
return copy.copy(self.train_idx), copy.copy(self.test_idx), copy.deepcopy(work_L), copy.deepcopy(work_U)
def get_workspace(self, iteration=None):
"""Get workspace after $iteration$ querying.
For example, if 0 is given, the initial workspace without any querying will be recovered.
Parameters
----------
iteration: int, optional(default=None)
Number of iteration, start from 0.
If nothing given, it will get the current workspace.
Returns
-------
train_idx: list
Index of training set, shape like [n_training_samples]
test_idx: list
Index of testing set, shape like [n_testing_samples]
label_idx: list
Index of labeling set, shape like [n_labeling_samples]
unlabel_idx: list
Index of unlabeling set, shape like [n_unlabeling_samples]
"""
if iteration is None:
iteration = len(self.__state_list)
assert (0 <= iteration <= len(self))
work_U = copy.deepcopy(self.init_U)
work_L = copy.deepcopy(self.init_L)
for i in range(iteration):
state = self.__state_list[i]
work_U.difference_update(state.get_value('select_index'))
work_L.update(state.get_value('select_index'))
return copy.copy(self.train_idx), copy.copy(self.test_idx), copy.deepcopy(work_L), copy.deepcopy(work_U)
def num_of_query(self):
"""Return the number of queries"""
return len(self.__state_list)
def get_current_performance(self):
"""Return the mean ± std performance of all existed states.
Only available when the performance of each state is a single float value.
Returns
-------
mean: float
Mean performance of the existing states.
std: float
Std performance of the existing states.
"""
if len(self) == 0:
return 0, 0
else:
tmp = [self[i].get_value('performance') for i in range(self.__len__())]
if isinstance(tmp[0], collections.Iterable):
return np.NaN, np.NaN
else:
return np.mean(tmp), np.std(tmp)
def refresh_info(self):
"""re-calculate current active learning progress."""
numqdata = 0
cost = 0.0
for state in self.__state_list:
numqdata += len(state.get_value('select_index'))
if 'cost' in state.keys():
cost += np.sum(state.get_value('cost'))
self.cost_inall = cost
self._numqdata = numqdata
return numqdata, cost
def __update_info(self):
"""Update current active learning progress"""
state = self.__state_list[len(self) - 1]
if 'cost' in state.keys():
self.cost_inall += np.sum(state.get_value('cost'))
self._numqdata += len(state.get_value('select_index'))
# class StateIO_all_labels(StateIO):
# """StateIO for all _labels querying"""
# def add_state(self, state):
# assert (isinstance(state, experiment_saver.state.State))
# self.__state_list.append(copy.deepcopy(state))
# if self.__check_flag:
# res, err_st, err_ind = self.check_select_index()
# if res == -1:
# warnings.warn(
# 'Checking validity fails, there is a queried instance not in set_U in '
# 'State:%d, index:%s.' % (err_st, str(err_ind)),
# category=ValidityWarning)
# if res == -2:
# warnings.warn('Checking validity fails, there are instances already queried '
# 'in previous iteration in State:%d, index:%s.' % (err_st, str(err_ind)),
# category=ValidityWarning)
# self.__update_info()
#
#
# if self.__verbose and len(self) % self.__print_interval == 0:
# if self._first_print:
# print('\n' + self.__repr__(), end='')
# self._first_print = False
# else:
# print('\r' + self._refresh_dataline(), end='')
# sys.stdout.flush()
#
# def check_select_index(self):
# """
# check:
# - Q has no repeating elements
# - Q in U
# Returns
# -------
# result: int
# check result
# - if -1 is returned, there is a queried instance not in U
# - if -2 is returned, there are repeated instances in Q
# - if 1 is returned, CHECK OK
#
# state_index: int
# the state index when checking fails (start from 0)
# if CHECK OK, None is returned.
#
# select_index: object
# the select_index when checking fails.
# if CHECK OK, None is returned.
# """
# repeat_dict = dict()
# ind = -1
# for st in self.__state_list:
# ind += 1
# for instance in st.get_value('select_index'):
# if instance not in self.init_U:
# return -1, ind, instance
# if instance not in repeat_dict.keys():
# repeat_dict[instance] = 1
# else:
# return -2, ind, instance
# return 1, None, None
#
# @property
# def queried_percentage(self):
# """return the queried percentage of unlabeled data"""
# return 100 * self._numqdata / len(self.init_U)
| 36.661327
| 190
| 0.603708
|
"""
StateIO
Container to store state object.
Several useful functions are implemented in this class:
1. Saving intermediate results to files.
2. Recover workspace at any iteration (label set and unlabel set).
3. Recover workspace from the intermediate result file in case the program exits unexpectedly.
4. Gathering and checking the information stored in State object.
5. Print active learning progress: current_iteration, current_mean_performance, current_cost, etc.
"""
# Authors: Ying-Peng Tang
# License: BSD 3 clause
from __future__ import division
import collections.abc
import copy
import os
import pickle
import sys
import numpy as np
import prettytable as pt
from .state import State
from ..index import IndexCollection, MultiLabelIndexCollection
from ..index.multi_label_tools import check_index_multilabel
from ..utils.interface import BaseCollection
__all__ = ['StateIO',
]
class StateIO:
"""
A class to store states.
Functions including:
1. Saving intermediate results to files.
2. Recover workspace at any iteration (label set and unlabel set).
3. Recover workspace from the intermediate result file in case the program exits unexpectedly.
4. Gathering and checking the information stored in State object.
5. Print active learning progress: current_iteration, current_mean_performance, current_cost, etc.
Parameters
----------
round: int
Number of k-fold experiments loop. 0 <= round < k
train_idx: array_like
Training index of one fold experiment.
test_idx: array_like
Testing index of one fold experiment.
init_L: array_like
Initial labeled index of one fold experiment.
init_U: array_like
Initial unlabeled index of one fold experiment.
initial_point: object, optional (default=None)
The performance before any querying.
If not specify, the initial point of different methods will be different.
saving_path: str, optional (default='.')
Path to save the intermediate files. If None is given, it will
not save the intermediate result.
check_flag: bool, optional (default=True)
Whether to check the validity of states.
verbose: bool, optional (default=True)
Whether to print query information during the AL process.
print_interval: int optional (default=1)
How many queries will trigger a print when verbose is True.
"""
def __init__(self, round, train_idx, test_idx, init_L, init_U, initial_point=None, saving_path=None,
check_flag=True, verbose=True, print_interval=1):
assert (isinstance(check_flag, bool))
assert (isinstance(verbose, bool))
self.__check_flag = check_flag
self.__verbose = verbose
self.__print_interval = print_interval
if self.__check_flag:
# check validity
assert (isinstance(train_idx, collections.Iterable))
assert (isinstance(test_idx, collections.Iterable))
assert (isinstance(init_U, collections.Iterable))
assert (isinstance(init_L, collections.Iterable))
assert (isinstance(round, int) and round >= 0)
self.round = round
self.train_idx = copy.copy(train_idx)
self.test_idx = copy.copy(test_idx)
if isinstance(init_U, BaseCollection) and isinstance(init_L, BaseCollection):
self.init_U = copy.deepcopy(init_U)
self.init_L = copy.deepcopy(init_L)
else:
try:
check_index_multilabel(init_L)
check_index_multilabel(init_U)
self.init_U = copy.deepcopy(MultiLabelIndexCollection(init_U))
self.init_L = copy.deepcopy(MultiLabelIndexCollection(init_L))
except TypeError:
self.init_U = copy.deepcopy(IndexCollection(init_U))
self.init_L = copy.deepcopy(IndexCollection(init_L))
# self.init_U = copy.deepcopy(IndexCollection(init_U) if not isinstance(init_U, BaseCollection) else init_U)
# self.init_L = copy.deepcopy(IndexCollection(init_L) if not isinstance(init_L, BaseCollection) else init_L)
self.initial_point = initial_point
self.batch_size = 0
self.__state_list = []
self._first_print = True
self.cost_inall = 0
self._numqdata = 0
self._saving_file_name = 'AL_round_' + str(self.round) + '.pkl'
self._saving_dir = None
if saving_path is not None:
if not isinstance(saving_path, str):
raise TypeError("A string is expected, but received: %s" % str(type(saving_path)))
saving_path = os.path.abspath(saving_path)
if os.path.isdir(saving_path):
self._saving_dir = saving_path
else:
self._saving_dir, self._saving_file_name = os.path.split(saving_path)
@classmethod
def load(cls, path):
"""Load StateIO object from file.
Parameters
----------
path: str
The path should be a specific .pkl file.
Returns
-------
object: StateIO
The StateIO object in the file.
"""
f = open(os.path.abspath(path), 'rb')
saver_from_file = pickle.load(f)
f.close()
return saver_from_file
def set_initial_point(self, perf):
"""The initial point of performance before querying.
Parameters
----------
perf: float
The performance value.
"""
self.initial_point = perf
def save(self):
"""Saving intermediate results to file."""
if self._saving_dir is None:
return
f = open(os.path.join(self._saving_dir, self._saving_file_name), 'wb')
pickle.dump(self, f)
f.close()
def add_state(self, state):
"""Add a State object to the container.
Parameters
----------
state: {dict, State}
State object to be added. Or a dictionary with
the following keys: ['select_index', 'queried_info', 'performance']
"""
if not isinstance(state, State):
assert isinstance(state, dict), "state must be dict or State object."
assert 'select_index' in state and 'queried_info' in state and 'performance' in state, "The dict must contain the following keys: ['select_index', 'queried_info', 'performance']"
self.__state_list.append(copy.deepcopy(state))
self.__update_info()
if self.__verbose and len(self) % self.__print_interval == 0:
if self._first_print:
print('\n' + self.__repr__(), end='')
self._first_print = False
else:
print('\r' + self._refresh_dataline(), end='')
sys.stdout.flush()
def get_state(self, index):
"""Get a State object in the container.
Parameters
----------
index: int
The index of the State object. 0 <= index < len(self)
Returns
-------
st: State
The State object in the previous iteration.
"""
assert (0 <= index < len(self))
return copy.deepcopy(self.__state_list[index])
def check_batch_size(self):
"""Check if all queries have the same batch size.
Returns
-------
result: bool
Whether all the states have the same batch size.
"""
ind_uni = np.unique(
[self.__state_list[i].batch_size for i in range(len(self.__state_list) - 1)], axis=0)
if len(ind_uni) == 1:
self.batch_size = ind_uni[0]
return True
else:
return False
def pop(self, i=None):
"""remove and return item at index (default last)."""
return self.__state_list.pop(i)
def recover_workspace(self, iteration=None):
"""Recover workspace after $iteration$ querying.
For example, if 0 is given, the initial workspace without any querying will be recovered.
Note that, the object itself will be recovered, the information after the iteration will be discarded.
Parameters
----------
iteration: int, optional(default=None)
Number of iteration to recover, start from 0.
If nothing given, it will return the current workspace.
Returns
-------
train_idx: list
Index of training set, shape like [n_training_samples]
test_idx: list
Index of testing set, shape like [n_testing_samples]
label_idx: list
Index of labeling set, shape like [n_labeling_samples]
unlabel_idx: list
Index of unlabeling set, shape like [n_unlabeling_samples]
"""
if iteration is None:
iteration = len(self.__state_list)
assert (0 <= iteration <= len(self))
work_U = copy.deepcopy(self.init_U)
work_L = copy.deepcopy(self.init_L)
for i in range(iteration):
state = self.__state_list[i]
work_U.difference_update(state.get_value('select_index'))
work_L.update(state.get_value('select_index'))
self.__state_list = self.__state_list[0:iteration]
return copy.copy(self.train_idx), copy.copy(self.test_idx), copy.deepcopy(work_L), copy.deepcopy(work_U)
def get_workspace(self, iteration=None):
"""Get workspace after $iteration$ querying.
For example, if 0 is given, the initial workspace without any querying will be recovered.
Parameters
----------
iteration: int, optional(default=None)
Number of iteration, start from 0.
If nothing given, it will get the current workspace.
Returns
-------
train_idx: list
Index of training set, shape like [n_training_samples]
test_idx: list
Index of testing set, shape like [n_testing_samples]
label_idx: list
Index of labeling set, shape like [n_labeling_samples]
unlabel_idx: list
Index of unlabeling set, shape like [n_unlabeling_samples]
"""
if iteration is None:
iteration = len(self.__state_list)
assert (0 <= iteration <= len(self))
work_U = copy.deepcopy(self.init_U)
work_L = copy.deepcopy(self.init_L)
for i in range(iteration):
state = self.__state_list[i]
work_U.difference_update(state.get_value('select_index'))
work_L.update(state.get_value('select_index'))
return copy.copy(self.train_idx), copy.copy(self.test_idx), copy.deepcopy(work_L), copy.deepcopy(work_U)
def num_of_query(self):
"""Return the number of queries"""
return len(self.__state_list)
def get_current_performance(self):
"""Return the mean ± std performance of all existed states.
Only available when the performance of each state is a single float value.
Returns
-------
mean: float
Mean performance of the existing states.
std: float
Std performance of the existing states.
"""
if len(self) == 0:
return 0, 0
else:
tmp = [self[i].get_value('performance') for i in range(self.__len__())]
if isinstance(tmp[0], collections.Iterable):
return np.NaN, np.NaN
else:
return np.mean(tmp), np.std(tmp)
def __len__(self):
return len(self.__state_list)
def __getitem__(self, item):
return self.__state_list.__getitem__(item)
def __contains__(self, other):
return other in self.__state_list
def __iter__(self):
return iter(self.__state_list)
def refresh_info(self):
"""re-calculate current active learning progress."""
numqdata = 0
cost = 0.0
for state in self.__state_list:
numqdata += len(state.get_value('select_index'))
if 'cost' in state.keys():
cost += np.sum(state.get_value('cost'))
self.cost_inall = cost
self._numqdata = numqdata
return numqdata, cost
def __update_info(self):
"""Update current active learning progress"""
state = self.__state_list[len(self) - 1]
if 'cost' in state.keys():
self.cost_inall += np.sum(state.get_value('cost'))
self._numqdata += len(state.get_value('select_index'))
def __repr__(self):
numqdata = self._numqdata
cost = self.cost_inall
tb = pt.PrettyTable()
tb.set_style(pt.MSWORD_FRIENDLY)
tb.add_column('round', [self.round])
tb.add_column('initially labeled data', [
" %d (%.2f%% of all)" % (len(self.init_L), 100 * len(self.init_L) / (len(self.init_L) + len(self.init_U)))])
tb.add_column('number of queries', [len(self.__state_list)])
# tb.add_column('queried data', ["%d (%.2f%% of unlabeled data)" % (numqdata, self.queried_percentage)])
tb.add_column('cost', [cost])
# tb.add_column('saving path', [self._saving_dir])
tb.add_column('Performance:', ["%.3f ± %.2f" % self.get_current_performance()])
return str(tb)
def _refresh_dataline(self):
tb = self.__repr__()
return tb.splitlines()[1]
# class StateIO_all_labels(StateIO):
# """StateIO for all _labels querying"""
# def add_state(self, state):
# assert (isinstance(state, experiment_saver.state.State))
# self.__state_list.append(copy.deepcopy(state))
# if self.__check_flag:
# res, err_st, err_ind = self.check_select_index()
# if res == -1:
# warnings.warn(
# 'Checking validity fails, there is a queried instance not in set_U in '
# 'State:%d, index:%s.' % (err_st, str(err_ind)),
# category=ValidityWarning)
# if res == -2:
# warnings.warn('Checking validity fails, there are instances already queried '
# 'in previous iteration in State:%d, index:%s.' % (err_st, str(err_ind)),
# category=ValidityWarning)
# self.__update_info()
#
#
# if self.__verbose and len(self) % self.__print_interval == 0:
# if self._first_print:
# print('\n' + self.__repr__(), end='')
# self._first_print = False
# else:
# print('\r' + self._refresh_dataline(), end='')
# sys.stdout.flush()
#
# def check_select_index(self):
# """
# check:
# - Q has no repeating elements
# - Q in U
# Returns
# -------
# result: int
# check result
# - if -1 is returned, there is a queried instance not in U
# - if -2 is returned, there are repeated instances in Q
# - if 1 is returned, CHECK OK
#
# state_index: int
# the state index when checking fails (start from 0)
# if CHECK OK, None is returned.
#
# select_index: object
# the select_index when checking fails.
# if CHECK OK, None is returned.
# """
# repeat_dict = dict()
# ind = -1
# for st in self.__state_list:
# ind += 1
# for instance in st.get_value('select_index'):
# if instance not in self.init_U:
# return -1, ind, instance
# if instance not in repeat_dict.keys():
# repeat_dict[instance] = 1
# else:
# return -2, ind, instance
# return 1, None, None
#
# @property
# def queried_percentage(self):
# """return the queried percentage of unlabeled data"""
# return 100 * self._numqdata / len(self.init_U)
| 3,429
| 0
| 189
|
95d77119937afbad81cc3276b13c8dbe68d86a97
| 660
|
py
|
Python
|
eval/cp.py
|
1asso/TOM-Net
|
ba13bd3f1bac0fa50c6043290691d7be5c29f777
|
[
"MIT"
] | 1
|
2020-06-25T22:45:46.000Z
|
2020-06-25T22:45:46.000Z
|
eval/cp.py
|
1asso/TOM-Net
|
ba13bd3f1bac0fa50c6043290691d7be5c29f777
|
[
"MIT"
] | null | null | null |
eval/cp.py
|
1asso/TOM-Net
|
ba13bd3f1bac0fa50c6043290691d7be5c29f777
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import re
import os
import torch
import torch.nn.functional as F
import struct
import glob
from PIL import Image
import torchvision.transforms.functional as TF
from shutil import copyfile
root_dir = ''
ori_dir = ''
if __name__ == '__main__':
sub_dir = [os.path.join(root_dir, name) for name in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, name))]
rec_err = 0
rec_bg = 0
for d in sub_dir:
print(d)
input = glob.glob(os.path.join(d, '*input.png'))[0]
name = input[:-10] + '.jpg'
name = name.split('/')[-1]
ori = glob.glob(os.path.join(ori_dir, name))[0]
copyfile(ori, input[:-10] + '_tar.png')
| 21.290323
| 121
| 0.681818
|
#!/usr/bin/env python3
import re
import os
import torch
import torch.nn.functional as F
import struct
import glob
from PIL import Image
import torchvision.transforms.functional as TF
from shutil import copyfile
root_dir = ''
ori_dir = ''
if __name__ == '__main__':
sub_dir = [os.path.join(root_dir, name) for name in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, name))]
rec_err = 0
rec_bg = 0
for d in sub_dir:
print(d)
input = glob.glob(os.path.join(d, '*input.png'))[0]
name = input[:-10] + '.jpg'
name = name.split('/')[-1]
ori = glob.glob(os.path.join(ori_dir, name))[0]
copyfile(ori, input[:-10] + '_tar.png')
| 0
| 0
| 0
|
80089fb785b3a1570d67b41953f2b6e2198dc3df
| 7,003
|
py
|
Python
|
vap_turn_taking/backchannel.py
|
ErikEkstedt/vad_turn_taking
|
c24e0ddfe9c739328872310e56f4b8c17f82c92c
|
[
"MIT"
] | null | null | null |
vap_turn_taking/backchannel.py
|
ErikEkstedt/vad_turn_taking
|
c24e0ddfe9c739328872310e56f4b8c17f82c92c
|
[
"MIT"
] | null | null | null |
vap_turn_taking/backchannel.py
|
ErikEkstedt/vad_turn_taking
|
c24e0ddfe9c739328872310e56f4b8c17f82c92c
|
[
"MIT"
] | null | null | null |
import torch
from vap_turn_taking.utils import find_island_idx_len
from vap_turn_taking.hold_shifts import get_dialog_states, get_last_speaker
def find_isolated_within(vad, prefix_frames, max_duration_frames, suffix_frames):
"""
... <= prefix_frames (silence) | <= max_duration_frames (active) | <= suffix_frames (silence) ...
"""
isolated = torch.zeros_like(vad)
for b, vad_tmp in enumerate(vad):
for speaker in [0, 1]:
starts, durs, vals = find_island_idx_len(vad_tmp[..., speaker])
for step in range(1, len(starts) - 1):
# Activity condition: current step is active
if vals[step] == 0:
continue
# Prefix condition:
# check that current active step comes after a certain amount of inactivity
if durs[step - 1] < prefix_frames:
continue
# Suffix condition
# check that current active step comes after a certain amount of inactivity
if durs[step + 1] < suffix_frames:
continue
current_dur = durs[step]
if current_dur <= max_duration_frames:
start = starts[step]
end = start + current_dur
isolated[b, start:end, speaker] = 1.0
return isolated
if __name__ == "__main__":
import matplotlib.pyplot as plt
from vap_turn_taking.plot_utils import plot_vad_oh
BS = Backhannel(**bs_dict)
tt_bc = BS(va)
(tt_bc["backchannel"] != bc).sum()
n_rows = 4
n_cols = 4
fig, ax = plt.subplots(n_rows, n_cols, sharey=True, sharex=True, figsize=(16, 4))
b = 0
for row in range(n_rows):
for col in range(n_cols):
_ = plot_vad_oh(vad[b], ax=ax[row, col])
_ = plot_vad_oh(
bc["backchannel"][b],
ax=ax[row, col],
colors=["purple", "purple"],
alpha=0.8,
)
b += 1
if b == vad.shape[0]:
break
if b == vad.shape[0]:
break
plt.pause(0.1)
| 36.857895
| 118
| 0.553049
|
import torch
from vap_turn_taking.utils import find_island_idx_len
from vap_turn_taking.hold_shifts import get_dialog_states, get_last_speaker
def find_isolated_within(vad, prefix_frames, max_duration_frames, suffix_frames):
"""
... <= prefix_frames (silence) | <= max_duration_frames (active) | <= suffix_frames (silence) ...
"""
isolated = torch.zeros_like(vad)
for b, vad_tmp in enumerate(vad):
for speaker in [0, 1]:
starts, durs, vals = find_island_idx_len(vad_tmp[..., speaker])
for step in range(1, len(starts) - 1):
# Activity condition: current step is active
if vals[step] == 0:
continue
# Prefix condition:
# check that current active step comes after a certain amount of inactivity
if durs[step - 1] < prefix_frames:
continue
# Suffix condition
# check that current active step comes after a certain amount of inactivity
if durs[step + 1] < suffix_frames:
continue
current_dur = durs[step]
if current_dur <= max_duration_frames:
start = starts[step]
end = start + current_dur
isolated[b, start:end, speaker] = 1.0
return isolated
class Backchannel:
def __init__(
self,
max_duration_frames,
min_duration_frames,
pre_silence_frames,
post_silence_frames,
metric_dur_frames,
metric_pre_label_dur,
):
assert (
metric_dur_frames <= max_duration_frames
), "`metric_dur_frames` must be less than `max_duration_frames`"
self.max_duration_frames = max_duration_frames
self.min_duration_frames = min_duration_frames
self.pre_silence_frames = pre_silence_frames
self.post_silence_frames = post_silence_frames
self.metric_dur_frames = metric_dur_frames
self.metric_pre_label_dur = metric_pre_label_dur
def __repr__(self):
s = "\nBackchannel"
s += f"\n max_duration_frames: {self.max_duration_frames}"
s += f"\n pre_silence_frames: {self.pre_silence_frames}"
s += f"\n post_silence_frames: {self.post_silence_frames}"
return s
def backchannel(self, vad, last_speaker, max_frame=None, min_context=0):
"""
Finds backchannel based on VAD signal. Iterates over batches and speakers.
Extracts segments of activity/non-activity to find backchannels.
Backchannel Conditions
* Backchannel activity must be shorter than `self.max_duration_frames`
* Backchannel activity must follow activity from the other speaker
* Silence prior to backchannel, in the "backchanneler" channel, must be greater than `self.pre_silence_frames`
* Silence after backchannel, in the "backchanneler" channel, must be greater than `self.pre_silence_frames`
"""
bc_oh = torch.zeros_like(vad)
pre_bc_oh = torch.zeros_like(vad)
for b, vad_tmp in enumerate(vad):
for speaker in [0, 1]:
other_speaker = 0 if speaker == 1 else 1
starts, durs, vals = find_island_idx_len(vad_tmp[..., speaker])
for step in range(1, len(starts) - 1):
# Activity condition: current step is active
if vals[step] == 0:
continue
# Activity duration condition: segment must be shorter than
# a certain number of frames
if durs[step] > self.max_duration_frames:
continue
if durs[step] < self.min_duration_frames:
continue
start = starts[step]
# Shift-ish condition:
# Was the other speaker active prior to this `backchannel` candidate?
# If not than this is a short IPU in the middle of a turn
pre_speaker_cond = last_speaker[b, start - 1] == other_speaker
if not pre_speaker_cond:
continue
# Prefix condition:
# check that current active step comes after a certain amount of inactivity
if durs[step - 1] < self.pre_silence_frames:
continue
# Suffix condition
# check that current active step comes after a certain amount of inactivity
if durs[step + 1] < self.post_silence_frames:
continue
# Add segment as a backchanel
end = starts[step] + durs[step]
if self.metric_dur_frames > 0:
end = starts[step] + self.metric_dur_frames
# Max Frame condition:
# can't have event outside of predictable window
if max_frame is not None:
if end >= max_frame:
continue
# Min Context condition:
if starts[step] < min_context:
continue
bc_oh[b, starts[step] : end, speaker] = 1.0
# Min Context condition:
if (starts[step] - self.metric_pre_label_dur) < min_context:
continue
pre_bc_oh[
b,
starts[step] - self.metric_pre_label_dur : starts[step],
speaker,
] = 1.0
return bc_oh, pre_bc_oh
def __call__(self, vad, last_speaker=None, ds=None, max_frame=None, min_context=0):
if ds is None:
ds = get_dialog_states(vad)
if last_speaker is None:
last_speaker = get_last_speaker(vad, ds)
bc_oh, pre_bc = self.backchannel(
vad, last_speaker, max_frame=max_frame, min_context=min_context
)
return {"backchannel": bc_oh, "pre_backchannel": pre_bc}
if __name__ == "__main__":
import matplotlib.pyplot as plt
from vap_turn_taking.plot_utils import plot_vad_oh
BS = Backhannel(**bs_dict)
tt_bc = BS(va)
(tt_bc["backchannel"] != bc).sum()
n_rows = 4
n_cols = 4
fig, ax = plt.subplots(n_rows, n_cols, sharey=True, sharex=True, figsize=(16, 4))
b = 0
for row in range(n_rows):
for col in range(n_cols):
_ = plot_vad_oh(vad[b], ax=ax[row, col])
_ = plot_vad_oh(
bc["backchannel"][b],
ax=ax[row, col],
colors=["purple", "purple"],
alpha=0.8,
)
b += 1
if b == vad.shape[0]:
break
if b == vad.shape[0]:
break
plt.pause(0.1)
| 1,307
| 3,505
| 23
|
0d21441157a5bcd0f69a92c179cb9be376ab1a78
| 190
|
py
|
Python
|
SUIBE_DID_Data_Manager/blueprints/data_manager/models.py
|
SUIBE-Blockchain/SUIBE_DID_Data_Manager
|
d38f3f37463f36802eb6acb578f8e17faf878c79
|
[
"MIT"
] | null | null | null |
SUIBE_DID_Data_Manager/blueprints/data_manager/models.py
|
SUIBE-Blockchain/SUIBE_DID_Data_Manager
|
d38f3f37463f36802eb6acb578f8e17faf878c79
|
[
"MIT"
] | 2
|
2020-10-21T07:05:43.000Z
|
2020-10-22T17:10:53.000Z
|
SUIBE_DID_Data_Manager/blueprints/data_manager/models.py
|
SUIBE-Blockchain/SUIBE_DID_Data_Manager
|
d38f3f37463f36802eb6acb578f8e17faf878c79
|
[
"MIT"
] | null | null | null |
import datetime as dt
from flask_login import UserMixin
from SUIBE_DID_Data_Manager.database import (
Column,
Model,
SurrogatePK,
db,
reference_col,
relationship,
)
| 15.833333
| 45
| 0.721053
|
import datetime as dt
from flask_login import UserMixin
from SUIBE_DID_Data_Manager.database import (
Column,
Model,
SurrogatePK,
db,
reference_col,
relationship,
)
| 0
| 0
| 0
|
cd7c1b16a2fe7d789b8f3a8bfd1cd2fda50d36ec
| 2,000
|
py
|
Python
|
base_app.py
|
loblab/resouce-simulator
|
a3d62f32ec1f377548519e7aa4eaef10d5bdd0c2
|
[
"Apache-2.0"
] | 1
|
2019-12-09T01:28:17.000Z
|
2019-12-09T01:28:17.000Z
|
base_app.py
|
loblab/resouce-simulator
|
a3d62f32ec1f377548519e7aa4eaef10d5bdd0c2
|
[
"Apache-2.0"
] | null | null | null |
base_app.py
|
loblab/resouce-simulator
|
a3d62f32ec1f377548519e7aa4eaef10d5bdd0c2
|
[
"Apache-2.0"
] | 1
|
2019-12-09T01:28:19.000Z
|
2019-12-09T01:28:19.000Z
|
import argparse
import signal
import time
import sys
import os
import logging
| 28.169014
| 105
| 0.5835
|
import argparse
import signal
import time
import sys
import os
import logging
class BaseApp:
def __init__(self, id, description):
signal.signal(signal.SIGINT, self.sig_handler)
signal.signal(signal.SIGTERM, self.sig_handler)
self.quit_flag = False
sfile = sys.argv[0]
ver = time.strftime('Ver %Y/%m/%d %H:%M %Z', time.localtime(os.path.getmtime(sfile)))
self.argps = argparse.ArgumentParser(description=description)
self.argps.add_argument('-V', '--version', action='version', version=ver)
self.argps.add_argument('-D', '--debug', action='store_true',
help="output more logs (debug level)")
self.base_init()
self.init()
self.args = self.argps.parse_args()
try:
self.id = self.args.id
except:
self.id = id
self.init_logger()
self.log.info(description)
self.log.info(ver)
if self.args.debug:
self.log.setLevel(logging.DEBUG)
self.log.debug("Debug log on")
def init_logger(self):
self.log = logging.getLogger(self.id)
self.log.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d - %(name)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S')
ch.setFormatter(formatter)
self.log.addHandler(ch)
def quit(self):
self.log.debug("Going to quit...")
self.quit_flag = True
def sig_handler(self, signum, frame):
self.log.info("Got signal %d" % signum)
self.quit()
def base_init(self):
pass
def init(self):
pass
def startup(self):
self.log.info("Startup...")
def cleanup(self):
self.log.info("Cleanup...")
def main(self):
self.startup()
while not self.quit_flag:
time.sleep(1)
self.cleanup()
| 1,662
| -7
| 266
|
f03ed12e66ae135f1cc8f9ec76a8ec9bbe0ca691
| 1,468
|
py
|
Python
|
iv/Binarysearch_strings/rotated_search_array.py
|
iamsuman/iv
|
bf68d3fd45455b6041e74b09272f69503bf7a8ac
|
[
"MIT"
] | 2
|
2020-09-19T22:28:15.000Z
|
2020-10-03T01:44:53.000Z
|
iv/Binarysearch_strings/rotated_search_array.py
|
iamsuman/iv
|
bf68d3fd45455b6041e74b09272f69503bf7a8ac
|
[
"MIT"
] | null | null | null |
iv/Binarysearch_strings/rotated_search_array.py
|
iamsuman/iv
|
bf68d3fd45455b6041e74b09272f69503bf7a8ac
|
[
"MIT"
] | 1
|
2020-10-03T01:43:30.000Z
|
2020-10-03T01:43:30.000Z
|
A = [4, 5, 6, 7, 0, 1, 2, 3]
A = [0,1,2,3,4,5,6,7]
B = 4
A = [ 101, 103, 106, 109, 158, 164, 182, 187, 202, 205, 2, 3, 32, 57, 69, 74, 81, 99, 100 ]
B = 202
a = Rotated_Search()
print(a.search(A, B))
| 25.310345
| 91
| 0.44891
|
class Rotated_Search():
def search(self, A, B):
n = len(A)
pivot = self.find_pivot(A, 0, n - 1)
# A = [4, 5, 6, 7, 0, 1, 2, 3]
# B = 4
# print(pivot)
if pivot == -1:
return self.binary_search(A, 0, n - 1, B)
if A[pivot] == B:
return pivot
if B < A[0]:
return self.binary_search(A, pivot + 1, n - 1, B)
else:
return self.binary_search(A, 0, pivot - 1, B)
def find_pivot(self, A, low, hi):
if low > hi:
return -1
if low == hi:
return low
mid = int((low + hi) / 2)
if mid < hi and A[mid] > A[mid + 1]:
return mid
if mid > low and A[mid] < A[mid - 1]:
return (mid - 1)
if A[low] >= A[mid]:
return self.find_pivot(A, low, mid - 1)
else:
return self.find_pivot(A, mid + 1, hi)
def binary_search(self, A, low, hi, key):
if hi < low:
return -1
mid = int((hi + low) / 2)
if key == A[mid]:
return mid
if key > A[mid]:
return self.binary_search(A, mid + 1, hi, key)
else:
return self.binary_search(A, low, mid - 1, key)
A = [4, 5, 6, 7, 0, 1, 2, 3]
A = [0,1,2,3,4,5,6,7]
B = 4
A = [ 101, 103, 106, 109, 158, 164, 182, 187, 202, 205, 2, 3, 32, 57, 69, 74, 81, 99, 100 ]
B = 202
a = Rotated_Search()
print(a.search(A, B))
| 1,160
| 2
| 102
|
5ffc54c364f831310a5bc6f460542db448a8e796
| 16,997
|
py
|
Python
|
mesohops/dynamics/hops_aux.py
|
MesoscienceLab/mesohops
|
b845dc61e65af158382a47c4894c3875e05f09e1
|
[
"MIT"
] | 7
|
2020-08-17T03:39:42.000Z
|
2022-02-10T22:55:55.000Z
|
mesohops/dynamics/hops_aux.py
|
MesoscienceLab/mesohops
|
b845dc61e65af158382a47c4894c3875e05f09e1
|
[
"MIT"
] | null | null | null |
mesohops/dynamics/hops_aux.py
|
MesoscienceLab/mesohops
|
b845dc61e65af158382a47c4894c3875e05f09e1
|
[
"MIT"
] | 1
|
2021-07-26T02:11:16.000Z
|
2021-07-26T02:11:16.000Z
|
import numpy as np
from collections.abc import Mapping
from mesohops.util.exceptions import AuxError
from scipy.special import binom
__title__ = "AuxiliaryVector Class"
__author__ = "D. I. G. Bennett"
__version__ = "1.0"
class AuxiliaryVector(Mapping):
"""
This is a class that encodes a sparse representation of auxiliary vectors
with some extra helper functions to simplify some common actions, such as:
determining the absolute index, adding a unit vector, and calculating the sum.
The class is not mutable - which is to say, once an auxiliary vector is defined,
it cannot be changed.
"""
__slots__ = ('dict_aux_vec', 'tuple_aux_vec', 'array_aux_vec', '__abs_index', '__len'
, 'hash', 'index', '_sum', '_dict_aux_p1', '_dict_aux_m1')
def __init__(self, aux_array, nmodes):
"""
INPUTS
------
1. aux_array : iterable
list of (mode, value) pairs for all non-zero indices of the auxiliary
vector
2. nmodes : int
the number of modes in the hierarchy which is the length of the dense
auxiliary vector.
RETURNS
-------
None
"""
self.dict_aux_vec = {
index_mode: aux_value for (index_mode, aux_value) in aux_array
}
self.tuple_aux_vec = tuple(
[tuple([mode, value]) for (mode, value) in aux_array]
)
self.array_aux_vec = np.array(aux_array)
if len(self.array_aux_vec)>0 and not np.all(np.diff(self.array_aux_vec[:,0])>0):
raise AuxError("array_aux_vec not properly ordered")
self.__abs_index = None
self.__len = nmodes
self.hash = hash(self.tuple_aux_vec)
self.index = None
self._sum = np.sum(self.values())
self._dict_aux_p1 = {}
self._dict_aux_m1 = {}
# Dictionary-like methods overwriting Mutable Mapping
# ===================================================
def keys(self):
"""
This function returns an array of mode indices for the auxiliary vectors
Parameters
----------
None
Returns
-------
1. keys : array
an array of mode indices with nonzero auxiliary index
"""
if len(self.dict_aux_vec) > 0:
return self.array_aux_vec[:, 0]
else:
return np.array([])
def values(self):
"""
This function returns an array of the auxiliary vector values
Parameters
----------
None
Returns
-------
1. values : array
an array of nonzero auxiliary index values
"""
if len(self.dict_aux_vec) > 0:
return self.array_aux_vec[:, 1]
else:
return np.array([])
# Comparison Methods
# ==================
def _compare(self, other, comparison_function):
"""
This function compares two auxiliary vectors
Parameters
----------
1. other : array
the array you want to compare
2. comparison_function : function
a comparison function
Returns
-------
1. bool_compare : bool
a boolean for the comparison
"""
if isinstance(other, AuxiliaryVector) and len(self) == len(other):
return comparison_function(self.absolute_index, other.absolute_index)
else:
return False
# Special Methods
# ===============
def difference_by_mode(self, other):
"""
Compares the current HopsAux object to another HopsAux object. If they differ
by only 1 step, then it returns the mode along which they differ.
Parameters
----------
1. other: HopsAux object
The HopsAux object to which the current object is compared.
Returns
-------
1. diff_mode : int or False
The mode index along which they differ or False if they differ
by more than 1 step.
"""
set_key_self = set(self.keys())
set_key_other = set(other.keys())
# Check that the two HopsAux belong to the same hierarchy
assert self.__len == len(other)
if np.abs(self._sum - other._sum) == 1:
if set_key_self == set_key_other:
values = np.abs(self.array_aux_vec[:,1]- other.array_aux_vec[:,1])
if np.sum(values) == 1:
return self.array_aux_vec[np.where(values)[0][0],0]
elif (len(set_key_self | set_key_other)
- len(set_key_self & set_key_other)) == 1:
value = 0
for key in set_key_self | set_key_other:
value += np.abs(self[key] - other[key])
if value == 1:
index = list((set_key_self | set_key_other) - (set_key_self &
set_key_other))[0]
return index
return False
def dot(self, vec):
"""
This is a function that performs a sparse dot product between the
auxiliary index vector and another vector.
Parameters
----------
1. vec : np.array
a vector
Returns
-------
1. product : float
the dot product value
"""
if len(self.dict_aux_vec) == 0:
return 0
else:
return np.dot(self.array_aux_vec[:, 1], vec[self.array_aux_vec[:, 0]])
def sum(self, **unused_kwargs):
"""
This function returns the sum of the auxiliary vector values
Parameters
----------
None
Returns
-------
1. sum : float
the sum of the nonzero values of the auxiliary vectors
"""
try:
return self._sum
except:
return np.sum(self.values())
def todense(self):
"""
This function will take a sparse vector and make it dense
Parameters
----------
None
Returns
-------
1. output : array
the dense vector
"""
output = np.zeros(self.__len)
if len(self.dict_aux_vec) == 0:
return output
output[self.keys()] = self.values()
return output
def toarray(self):
"""
This function converts a dict to an array
Parameters
----------
None
Returns
-------
1. array : array
a dict in an array form
"""
return self.array_aux_vec
def get_values(self, index_slice):
"""
This function gets the dense auxiliary vector values from a sub-indexed list
Parameters
----------
1. index_slice : list
a list of indices
Returns
-------
1. values : array
an array of values at the given indices
"""
return np.array([self.__getitem__(key) for key in index_slice])
def get_values_nonzero(self, index_slice):
"""
This function gets the sparse auxiliary vector values from a sub-indexed list
NOTE: the values are returned in key order not the order
they are present in index_slice
Parameters
----------
1. index_slice : list
a list of indices
Returns
-------
1. values : array
a sparse array of the non-zero auxiliary vector values
"""
return np.array(
[self.dict_aux_vec[key] for key in self.keys() if key in index_slice]
)
def e_step(self, mode, step):
"""
This function returns a new Auxiliary Vector with the desired step in the given
mode
Parameters
----------
1. mode : int
The absolute mode index
2. step : int
The change in the aux value for the given mode
Returns
-------
1. aux_vec : tuple
the new sparse auxiliary vector
"""
return AuxiliaryVector(self.tuple_from_e_step(mode, step), nmodes=self.__len)
def hash_from_e_step(self, mode, step):
"""
This function returns the hash of a new Auxiliary Vector with the desired step
in the given mode
Parameters
----------
1. mode : int
The absolute mode index
2. step : int
The change in the aux value for the given mode
Returns
-------
1. hash : int
the hash of the tuple sparse auxiliary vector created from e_step
"""
return hash(self.tuple_from_e_step(mode, step))
def tuple_from_e_step(self, mode, step):
"""
Returns the sparse tuple representation of the auxiliary that is the given step
length along the given absolute mode index away from the current auxiliary.
Parameters
----------
1. mode : int
The absolute mode index
2. step : int
The change in the aux value for the given mode
Returns
-------
1. tuple_aux : tuple
The sparse representation of the auxiliary (sorted mode order)
"""
if step == 0:
return self.tuple_aux_vec
elif self.__getitem__(mode) + step < 0:
return ((0, -1),)
elif len(self.dict_aux_vec) == 0:
return tuple([(mode, step)])
elif mode in self.array_aux_vec[:, 0]:
if self.__getitem__(mode) + step == 0:
return tuple(
[
tuple([mode_i, value_i])
for (mode_i, value_i) in self.tuple_aux_vec
if mode_i != mode
]
)
else:
return tuple(
[
tuple([mode_i, value_i + step])
if mode_i == mode
else tuple([mode_i, value_i])
for (mode_i, value_i) in self.tuple_aux_vec
]
)
else:
list_keys = list(self.dict_aux_vec.keys())
list_keys.append(mode)
list_keys.sort()
list_values = [
step if key == mode else self.dict_aux_vec[key] for key in list_keys
]
return tuple(
[tuple([mode, value]) for (mode, value) in zip(list_keys, list_values)]
)
def index_analytic(self):
"""
This function provides an absolute index value for an auxiliary
vector using an analytic function of the indices. The basic idea
is that the indices are always ordered by increasing hierarchy
'level' (i.e. the sum of the indices). Within a level they are ordered
by first comparing the first values, then the second values, etc.
This gives the indexing a particularly simple form with a level:
L = sum(i_0,...,i_n)
(i_0, ... i_N) = sum_n<N sum_L>ln>i_n ((N-n-1 , L-sum(aux[:n])-ln)
where ((L,K)) denotes a L multichoose K.
The derivation of the following equations is given on p. 68 of
Quantum Notebook #1. The sums have been removed by making use of the
binomial sum property and the binomial symmetry property. The result is
a equation that only sums over a number of elements equal to the number
of non-zero terms in aux.
PARAMETERS
----------
None
RETURNS
-------
1. index : int
the absolute index for an auxiliary
"""
# Constants
# ---------
aux = self.toarray()
n_hmode = self.__len
L = self.sum()
if not aux.size:
return 0
else:
# Calculate number of aux at order less than L
# --------------------------------------------
n_aux_below_l = int(binom(n_hmode + L - 1, L - 1))
# Calculate N+ contribution
# -------------------------
list_np_boxes = [n_hmode]
list_np_boxes.extend(n_hmode - aux[:-1, 0] - 1)
list_np_boxes = np.array(list_np_boxes)
list_np_balls = [L]
list_np_balls.extend(L - np.cumsum(aux[:-1, 1]))
list_np_balls = np.array(list_np_balls)
n_plus = np.nansum(
binom(list_np_boxes + list_np_balls - 1, list_np_boxes - 1)
)
# Calculate N- contributions
# --------------------------
list_nm_boxes = n_hmode - aux[:, 0] - 1
n_minus = np.nansum(binom(list_nm_boxes + list_np_balls, list_nm_boxes))
# calculate M contributions
# -------------------------
list_m_balls = L - np.cumsum(aux[:, 1]) - 1
m = np.nansum(binom(list_nm_boxes + list_m_balls, list_m_balls))
return int(n_aux_below_l + m + n_plus - n_minus)
def add_aux_connect(self, index_mode, aux_other, type):
"""
The function that updates the HopsAux object to contain a pointer to the
other HopsAux objects it is connected to.
Parameters
----------
1. index_mode : int
the mode along which the two HopsAux objects are connected
2. aux_other : HopsAux
the HopsAux object it is connected to
3. type : int
+1 or -1 depending on if the other aux has a larger or smaller sum
Returns
-------
1. None
"""
if type == 1:
self._dict_aux_p1.update({index_mode: aux_other})
elif type == -1:
self._dict_aux_m1.update({index_mode: aux_other})
else:
raise AuxError('add_aux_connect does not support type={}'.format(type))
def remove_aux_connect(self, index_mode, type):
"""
The function that removes the connection between the HopsAux object and another
connected with type (+1/-1) along index mode.
Parameters
----------
1. index_mode : int
the mode along which the two HopsAux objects are connected
2. type : int
+1 or -1 depending on if the other aux has a larger or smaller sum
Returns
-------
1. None
"""
if type == 1:
self._dict_aux_p1.pop(index_mode)
elif type == -1:
self._dict_aux_m1.pop(index_mode)
else:
raise AuxError('add_aux_connect does not support type={}'.format(type))
def remove_pointers(self):
"""
The function that removes all pointers targeting the current HopsAux object
from the set of HopsAux objects it has connections to.
Parameters
----------
1. None
Returns
-------
1. None
"""
for (index_mode, aux) in self.dict_aux_p1.items():
aux.remove_aux_connect(index_mode, -1)
for (index_mode, aux) in self.dict_aux_m1.items():
aux.remove_aux_connect(index_mode, 1)
self._dict_aux_m1 = {}
self._dict_aux_p1 = {}
@property
@property
@property
| 30.735986
| 91
| 0.526681
|
import numpy as np
from collections.abc import Mapping
from mesohops.util.exceptions import AuxError
from scipy.special import binom
__title__ = "AuxiliaryVector Class"
__author__ = "D. I. G. Bennett"
__version__ = "1.0"
class AuxiliaryVector(Mapping):
"""
This is a class that encodes a sparse representation of auxiliary vectors
with some extra helper functions to simplify some common actions, such as:
determining the absolute index, adding a unit vector, and calculating the sum.
The class is not mutable - which is to say, once an auxiliary vector is defined,
it cannot be changed.
"""
__slots__ = ('dict_aux_vec', 'tuple_aux_vec', 'array_aux_vec', '__abs_index', '__len'
, 'hash', 'index', '_sum', '_dict_aux_p1', '_dict_aux_m1')
def __init__(self, aux_array, nmodes):
"""
INPUTS
------
1. aux_array : iterable
list of (mode, value) pairs for all non-zero indices of the auxiliary
vector
2. nmodes : int
the number of modes in the hierarchy which is the length of the dense
auxiliary vector.
RETURNS
-------
None
"""
self.dict_aux_vec = {
index_mode: aux_value for (index_mode, aux_value) in aux_array
}
self.tuple_aux_vec = tuple(
[tuple([mode, value]) for (mode, value) in aux_array]
)
self.array_aux_vec = np.array(aux_array)
if len(self.array_aux_vec)>0 and not np.all(np.diff(self.array_aux_vec[:,0])>0):
raise AuxError("array_aux_vec not properly ordered")
self.__abs_index = None
self.__len = nmodes
self.hash = hash(self.tuple_aux_vec)
self.index = None
self._sum = np.sum(self.values())
self._dict_aux_p1 = {}
self._dict_aux_m1 = {}
# Dictionary-like methods overwriting Mutable Mapping
# ===================================================
def __getitem__(self, key):
if key in self.dict_aux_vec.keys():
return self.dict_aux_vec[key]
elif key < len(self):
return 0
else:
raise AuxError("mode index larger than total number of modes.")
def __iter__(self):
return iter(self.dict_aux_vec)
def __len__(self):
return self.__len
def __repr__(self):
return f"{type(self).__name__}({self.dict_aux_vec})"
def keys(self):
"""
This function returns an array of mode indices for the auxiliary vectors
Parameters
----------
None
Returns
-------
1. keys : array
an array of mode indices with nonzero auxiliary index
"""
if len(self.dict_aux_vec) > 0:
return self.array_aux_vec[:, 0]
else:
return np.array([])
def values(self):
"""
This function returns an array of the auxiliary vector values
Parameters
----------
None
Returns
-------
1. values : array
an array of nonzero auxiliary index values
"""
if len(self.dict_aux_vec) > 0:
return self.array_aux_vec[:, 1]
else:
return np.array([])
# Comparison Methods
# ==================
def __hash__(self):
return self.hash
def __eq__(self, other):
return self.hash == other.hash
def __ne__(self, other):
return self.hash != other.hash
def _compare(self, other, comparison_function):
"""
This function compares two auxiliary vectors
Parameters
----------
1. other : array
the array you want to compare
2. comparison_function : function
a comparison function
Returns
-------
1. bool_compare : bool
a boolean for the comparison
"""
if isinstance(other, AuxiliaryVector) and len(self) == len(other):
return comparison_function(self.absolute_index, other.absolute_index)
else:
return False
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
# Special Methods
# ===============
def difference_by_mode(self, other):
"""
Compares the current HopsAux object to another HopsAux object. If they differ
by only 1 step, then it returns the mode along which they differ.
Parameters
----------
1. other: HopsAux object
The HopsAux object to which the current object is compared.
Returns
-------
1. diff_mode : int or False
The mode index along which they differ or False if they differ
by more than 1 step.
"""
set_key_self = set(self.keys())
set_key_other = set(other.keys())
# Check that the two HopsAux belong to the same hierarchy
assert self.__len == len(other)
if np.abs(self._sum - other._sum) == 1:
if set_key_self == set_key_other:
values = np.abs(self.array_aux_vec[:,1]- other.array_aux_vec[:,1])
if np.sum(values) == 1:
return self.array_aux_vec[np.where(values)[0][0],0]
elif (len(set_key_self | set_key_other)
- len(set_key_self & set_key_other)) == 1:
value = 0
for key in set_key_self | set_key_other:
value += np.abs(self[key] - other[key])
if value == 1:
index = list((set_key_self | set_key_other) - (set_key_self &
set_key_other))[0]
return index
return False
def dot(self, vec):
"""
This is a function that performs a sparse dot product between the
auxiliary index vector and another vector.
Parameters
----------
1. vec : np.array
a vector
Returns
-------
1. product : float
the dot product value
"""
if len(self.dict_aux_vec) == 0:
return 0
else:
return np.dot(self.array_aux_vec[:, 1], vec[self.array_aux_vec[:, 0]])
def sum(self, **unused_kwargs):
"""
This function returns the sum of the auxiliary vector values
Parameters
----------
None
Returns
-------
1. sum : float
the sum of the nonzero values of the auxiliary vectors
"""
try:
return self._sum
except:
return np.sum(self.values())
def todense(self):
"""
This function will take a sparse vector and make it dense
Parameters
----------
None
Returns
-------
1. output : array
the dense vector
"""
output = np.zeros(self.__len)
if len(self.dict_aux_vec) == 0:
return output
output[self.keys()] = self.values()
return output
def toarray(self):
"""
This function converts a dict to an array
Parameters
----------
None
Returns
-------
1. array : array
a dict in an array form
"""
return self.array_aux_vec
def get_values(self, index_slice):
"""
This function gets the dense auxiliary vector values from a sub-indexed list
Parameters
----------
1. index_slice : list
a list of indices
Returns
-------
1. values : array
an array of values at the given indices
"""
return np.array([self.__getitem__(key) for key in index_slice])
def get_values_nonzero(self, index_slice):
"""
This function gets the sparse auxiliary vector values from a sub-indexed list
NOTE: the values are returned in key order not the order
they are present in index_slice
Parameters
----------
1. index_slice : list
a list of indices
Returns
-------
1. values : array
a sparse array of the non-zero auxiliary vector values
"""
return np.array(
[self.dict_aux_vec[key] for key in self.keys() if key in index_slice]
)
def e_step(self, mode, step):
"""
This function returns a new Auxiliary Vector with the desired step in the given
mode
Parameters
----------
1. mode : int
The absolute mode index
2. step : int
The change in the aux value for the given mode
Returns
-------
1. aux_vec : tuple
the new sparse auxiliary vector
"""
return AuxiliaryVector(self.tuple_from_e_step(mode, step), nmodes=self.__len)
def hash_from_e_step(self, mode, step):
"""
This function returns the hash of a new Auxiliary Vector with the desired step
in the given mode
Parameters
----------
1. mode : int
The absolute mode index
2. step : int
The change in the aux value for the given mode
Returns
-------
1. hash : int
the hash of the tuple sparse auxiliary vector created from e_step
"""
return hash(self.tuple_from_e_step(mode, step))
def tuple_from_e_step(self, mode, step):
"""
Returns the sparse tuple representation of the auxiliary that is the given step
length along the given absolute mode index away from the current auxiliary.
Parameters
----------
1. mode : int
The absolute mode index
2. step : int
The change in the aux value for the given mode
Returns
-------
1. tuple_aux : tuple
The sparse representation of the auxiliary (sorted mode order)
"""
if step == 0:
return self.tuple_aux_vec
elif self.__getitem__(mode) + step < 0:
return ((0, -1),)
elif len(self.dict_aux_vec) == 0:
return tuple([(mode, step)])
elif mode in self.array_aux_vec[:, 0]:
if self.__getitem__(mode) + step == 0:
return tuple(
[
tuple([mode_i, value_i])
for (mode_i, value_i) in self.tuple_aux_vec
if mode_i != mode
]
)
else:
return tuple(
[
tuple([mode_i, value_i + step])
if mode_i == mode
else tuple([mode_i, value_i])
for (mode_i, value_i) in self.tuple_aux_vec
]
)
else:
list_keys = list(self.dict_aux_vec.keys())
list_keys.append(mode)
list_keys.sort()
list_values = [
step if key == mode else self.dict_aux_vec[key] for key in list_keys
]
return tuple(
[tuple([mode, value]) for (mode, value) in zip(list_keys, list_values)]
)
def index_analytic(self):
"""
This function provides an absolute index value for an auxiliary
vector using an analytic function of the indices. The basic idea
is that the indices are always ordered by increasing hierarchy
'level' (i.e. the sum of the indices). Within a level they are ordered
by first comparing the first values, then the second values, etc.
This gives the indexing a particularly simple form with a level:
L = sum(i_0,...,i_n)
(i_0, ... i_N) = sum_n<N sum_L>ln>i_n ((N-n-1 , L-sum(aux[:n])-ln)
where ((L,K)) denotes a L multichoose K.
The derivation of the following equations is given on p. 68 of
Quantum Notebook #1. The sums have been removed by making use of the
binomial sum property and the binomial symmetry property. The result is
a equation that only sums over a number of elements equal to the number
of non-zero terms in aux.
PARAMETERS
----------
None
RETURNS
-------
1. index : int
the absolute index for an auxiliary
"""
# Constants
# ---------
aux = self.toarray()
n_hmode = self.__len
L = self.sum()
if not aux.size:
return 0
else:
# Calculate number of aux at order less than L
# --------------------------------------------
n_aux_below_l = int(binom(n_hmode + L - 1, L - 1))
# Calculate N+ contribution
# -------------------------
list_np_boxes = [n_hmode]
list_np_boxes.extend(n_hmode - aux[:-1, 0] - 1)
list_np_boxes = np.array(list_np_boxes)
list_np_balls = [L]
list_np_balls.extend(L - np.cumsum(aux[:-1, 1]))
list_np_balls = np.array(list_np_balls)
n_plus = np.nansum(
binom(list_np_boxes + list_np_balls - 1, list_np_boxes - 1)
)
# Calculate N- contributions
# --------------------------
list_nm_boxes = n_hmode - aux[:, 0] - 1
n_minus = np.nansum(binom(list_nm_boxes + list_np_balls, list_nm_boxes))
# calculate M contributions
# -------------------------
list_m_balls = L - np.cumsum(aux[:, 1]) - 1
m = np.nansum(binom(list_nm_boxes + list_m_balls, list_m_balls))
return int(n_aux_below_l + m + n_plus - n_minus)
def add_aux_connect(self, index_mode, aux_other, type):
"""
The function that updates the HopsAux object to contain a pointer to the
other HopsAux objects it is connected to.
Parameters
----------
1. index_mode : int
the mode along which the two HopsAux objects are connected
2. aux_other : HopsAux
the HopsAux object it is connected to
3. type : int
+1 or -1 depending on if the other aux has a larger or smaller sum
Returns
-------
1. None
"""
if type == 1:
self._dict_aux_p1.update({index_mode: aux_other})
elif type == -1:
self._dict_aux_m1.update({index_mode: aux_other})
else:
raise AuxError('add_aux_connect does not support type={}'.format(type))
def remove_aux_connect(self, index_mode, type):
"""
The function that removes the connection between the HopsAux object and another
connected with type (+1/-1) along index mode.
Parameters
----------
1. index_mode : int
the mode along which the two HopsAux objects are connected
2. type : int
+1 or -1 depending on if the other aux has a larger or smaller sum
Returns
-------
1. None
"""
if type == 1:
self._dict_aux_p1.pop(index_mode)
elif type == -1:
self._dict_aux_m1.pop(index_mode)
else:
raise AuxError('add_aux_connect does not support type={}'.format(type))
def remove_pointers(self):
"""
The function that removes all pointers targeting the current HopsAux object
from the set of HopsAux objects it has connections to.
Parameters
----------
1. None
Returns
-------
1. None
"""
for (index_mode, aux) in self.dict_aux_p1.items():
aux.remove_aux_connect(index_mode, -1)
for (index_mode, aux) in self.dict_aux_m1.items():
aux.remove_aux_connect(index_mode, 1)
self._dict_aux_m1 = {}
self._dict_aux_p1 = {}
@property
def absolute_index(self):
if self.__abs_index is None:
self.__abs_index = self.index_analytic()
return self.__abs_index
@property
def dict_aux_p1(self):
return self._dict_aux_p1
@property
def dict_aux_m1(self):
return self._dict_aux_m1
| 892
| 0
| 373
|
de1f45d8e5aa67310bfd559a5f88a213e3a1cf2f
| 523
|
py
|
Python
|
add_user.py
|
dev-johnlopez/offerly
|
3d53e64747555318addd35b94b5674e1c3ad99d0
|
[
"MIT"
] | null | null | null |
add_user.py
|
dev-johnlopez/offerly
|
3d53e64747555318addd35b94b5674e1c3ad99d0
|
[
"MIT"
] | null | null | null |
add_user.py
|
dev-johnlopez/offerly
|
3d53e64747555318addd35b94b5674e1c3ad99d0
|
[
"MIT"
] | null | null | null |
import os
from app import create_app, db, cli
from flask import current_app
from flask_security import Security, SQLAlchemyUserDatastore, current_user
from flask_security.utils import encrypt_password
from app.auth.models import Role, User
# Setup Flask-Security
app = create_app()
with app.app_context():
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
user_datastore.create_user(email=os.environ.get('ADMIN_USERNAME'), password=encrypt_password(os.environ.get('ADMIN_PASSWORD')))
db.session.commit()
| 40.230769
| 131
| 0.806883
|
import os
from app import create_app, db, cli
from flask import current_app
from flask_security import Security, SQLAlchemyUserDatastore, current_user
from flask_security.utils import encrypt_password
from app.auth.models import Role, User
# Setup Flask-Security
app = create_app()
with app.app_context():
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
user_datastore.create_user(email=os.environ.get('ADMIN_USERNAME'), password=encrypt_password(os.environ.get('ADMIN_PASSWORD')))
db.session.commit()
| 0
| 0
| 0
|
741e3c39db2ab2880fc0332e1af0e3b72810ce23
| 776
|
py
|
Python
|
tests/test_position_weight_matrix.py
|
knutdrand/bionumpy
|
2a520ebfce19f346284bd5cf21d6197f6ba801ba
|
[
"MIT"
] | null | null | null |
tests/test_position_weight_matrix.py
|
knutdrand/bionumpy
|
2a520ebfce19f346284bd5cf21d6197f6ba801ba
|
[
"MIT"
] | null | null | null |
tests/test_position_weight_matrix.py
|
knutdrand/bionumpy
|
2a520ebfce19f346284bd5cf21d6197f6ba801ba
|
[
"MIT"
] | 1
|
2022-03-07T21:58:03.000Z
|
2022-03-07T21:58:03.000Z
|
import pytest
import numpy as np
from bionumpy.position_weight_matrix import PositionWeightMatrix
@pytest.fixture
@pytest.fixture
@pytest.fixture
| 22.171429
| 77
| 0.632732
|
import pytest
import numpy as np
from bionumpy.position_weight_matrix import PositionWeightMatrix
@pytest.fixture
def matrix():
with np.errstate(divide='ignore'):
m = np.log([[0.4, 0.25],
[0.1, 0.25],
[0.4, 0.25],
[0.1, 0.25]])
return m
@pytest.fixture
def window():
return np.array([0, 1])
@pytest.fixture
def sequence():
return np.array([0, 1, 2, 3])
def test_window(window, matrix):
log_prob = PositionWeightMatrix(matrix)(window)
np.testing.assert_allclose(np.exp(log_prob), 0.4*0.25)
def test_sequence(sequence, matrix):
log_prob = PositionWeightMatrix(matrix).rolling_window(sequence)
np.testing.assert_allclose(np.exp(log_prob), [0.4*0.25, 0.025, 0.4*0.25])
| 509
| 0
| 112
|
7489252a415aa1b1e78068fe1fc7e8079693d7fd
| 690
|
py
|
Python
|
src/script.py
|
FabricioCrespo/fairMOT_simple
|
d3be761935c09a37493cb86f6bfd08504c380563
|
[
"MIT"
] | 19
|
2020-07-31T07:22:32.000Z
|
2022-02-28T10:00:23.000Z
|
src/script.py
|
FabricioCrespo/fairMOT_simple
|
d3be761935c09a37493cb86f6bfd08504c380563
|
[
"MIT"
] | 7
|
2020-09-26T14:49:16.000Z
|
2022-03-12T00:38:08.000Z
|
src/script.py
|
FabricioCrespo/fairMOT_simple
|
d3be761935c09a37493cb86f6bfd08504c380563
|
[
"MIT"
] | 14
|
2020-09-11T19:39:38.000Z
|
2021-10-11T12:57:55.000Z
|
import argparse
from track import *
parser = argparse.ArgumentParser()
parser.add_argument('-mp','--model_path',help='path to model',type=str)
parser.add_argument('-vp','--video_path',help='path to the video',type=str)
parser.add_argument('-od','--output_dir',help='path to save the video',type=str)
if __name__=='__main__':
args = parser.parse_args()
out_dir = args.output_dir
model_path = args.model_path
video_path = args.video_path
dl = datasets.LoadVideo(video_path, (1088,608))
opt = opts().init()
opt.load_model = model_path
show_image = False
output_dir = out_dir
eval_seq(opt, dl, 'mot',save_dir=output_dir, show_image=show_image)
| 28.75
| 80
| 0.704348
|
import argparse
from track import *
parser = argparse.ArgumentParser()
parser.add_argument('-mp','--model_path',help='path to model',type=str)
parser.add_argument('-vp','--video_path',help='path to the video',type=str)
parser.add_argument('-od','--output_dir',help='path to save the video',type=str)
if __name__=='__main__':
args = parser.parse_args()
out_dir = args.output_dir
model_path = args.model_path
video_path = args.video_path
dl = datasets.LoadVideo(video_path, (1088,608))
opt = opts().init()
opt.load_model = model_path
show_image = False
output_dir = out_dir
eval_seq(opt, dl, 'mot',save_dir=output_dir, show_image=show_image)
| 0
| 0
| 0
|
2bf5f9ed6b0ed77069900efcb2b23bd2ce0282f2
| 1,921
|
py
|
Python
|
python-packages/pyRiemann-0.2.2/tests/test_clustering.py
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
ee45bee6f96cdb6d91184abc16f41bba1546c943
|
[
"BSD-3-Clause"
] | 2
|
2017-08-13T14:09:32.000Z
|
2018-07-16T23:39:00.000Z
|
python-packages/pyRiemann-0.2.2/tests/test_clustering.py
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
ee45bee6f96cdb6d91184abc16f41bba1546c943
|
[
"BSD-3-Clause"
] | null | null | null |
python-packages/pyRiemann-0.2.2/tests/test_clustering.py
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
ee45bee6f96cdb6d91184abc16f41bba1546c943
|
[
"BSD-3-Clause"
] | 2
|
2018-04-02T06:45:11.000Z
|
2018-07-16T23:39:02.000Z
|
import numpy as np
from pyriemann.clustering import Kmeans,KmeansPerClassTransform
def generate_cov(Nt,Ne):
"""Generate a set of cavariances matrices for test purpose"""
diags = 1.0+0.1*np.random.randn(Nt,Ne)
covmats = np.empty((Nt,Ne,Ne))
for i in range(Nt):
covmats[i] = np.diag(diags[i])
return covmats
def test_Kmeans_init():
"""Test init of Kmeans"""
km = Kmeans(2)
def test_Kmeans_fit():
"""Test Fit of Kmeans"""
covset = generate_cov(20,3)
km = Kmeans(2)
km.fit(covset)
def test_Kmeans_fit_with_init():
"""Test Fit of Kmeans wit matric initialization"""
covset = generate_cov(20,3)
km = Kmeans(2,init=covset[0:2])
km.fit(covset)
def test_Kmeans_fit_with_y():
"""Test Fit of Kmeans with a given y"""
covset = generate_cov(20,3)
labels = np.array([0,1]).repeat(10)
km = Kmeans(2)
km.fit(covset,y=labels)
def test_Kmeans_fit_parallel():
"""Test Fit of Kmeans using paralell"""
covset = generate_cov(20,3)
km = Kmeans(2,n_jobs=2)
km.fit(covset)
def test_Kmeans_predict():
"""Test prediction of Kmeans"""
covset = generate_cov(20,3)
km = Kmeans(2)
km.fit(covset)
km.predict(covset)
def test_Kmeans_transform():
"""Test transform of Kmeans"""
covset = generate_cov(20,3)
km = Kmeans(2)
km.fit(covset)
km.transform(covset)
def test_KmeansPCT_init():
"""Test init of Kmeans PCT"""
km = KmeansPerClassTransform(2)
def test_KmeansPCT_fit():
"""Test Fit of Kmeans PCT"""
covset = generate_cov(20,3)
labels = np.array([0,1]).repeat(10)
km = KmeansPerClassTransform(2)
km.fit(covset,labels)
def test_KmeansPCT_transform():
"""Test Transform of Kmeans PCT"""
covset = generate_cov(20,3)
labels = np.array([0,1]).repeat(10)
km = KmeansPerClassTransform(2)
km.fit(covset,labels)
km.transform(covset)
| 26.680556
| 65
| 0.647579
|
import numpy as np
from pyriemann.clustering import Kmeans,KmeansPerClassTransform
def generate_cov(Nt,Ne):
"""Generate a set of cavariances matrices for test purpose"""
diags = 1.0+0.1*np.random.randn(Nt,Ne)
covmats = np.empty((Nt,Ne,Ne))
for i in range(Nt):
covmats[i] = np.diag(diags[i])
return covmats
def test_Kmeans_init():
"""Test init of Kmeans"""
km = Kmeans(2)
def test_Kmeans_fit():
"""Test Fit of Kmeans"""
covset = generate_cov(20,3)
km = Kmeans(2)
km.fit(covset)
def test_Kmeans_fit_with_init():
"""Test Fit of Kmeans wit matric initialization"""
covset = generate_cov(20,3)
km = Kmeans(2,init=covset[0:2])
km.fit(covset)
def test_Kmeans_fit_with_y():
"""Test Fit of Kmeans with a given y"""
covset = generate_cov(20,3)
labels = np.array([0,1]).repeat(10)
km = Kmeans(2)
km.fit(covset,y=labels)
def test_Kmeans_fit_parallel():
"""Test Fit of Kmeans using paralell"""
covset = generate_cov(20,3)
km = Kmeans(2,n_jobs=2)
km.fit(covset)
def test_Kmeans_predict():
"""Test prediction of Kmeans"""
covset = generate_cov(20,3)
km = Kmeans(2)
km.fit(covset)
km.predict(covset)
def test_Kmeans_transform():
"""Test transform of Kmeans"""
covset = generate_cov(20,3)
km = Kmeans(2)
km.fit(covset)
km.transform(covset)
def test_KmeansPCT_init():
"""Test init of Kmeans PCT"""
km = KmeansPerClassTransform(2)
def test_KmeansPCT_fit():
"""Test Fit of Kmeans PCT"""
covset = generate_cov(20,3)
labels = np.array([0,1]).repeat(10)
km = KmeansPerClassTransform(2)
km.fit(covset,labels)
def test_KmeansPCT_transform():
"""Test Transform of Kmeans PCT"""
covset = generate_cov(20,3)
labels = np.array([0,1]).repeat(10)
km = KmeansPerClassTransform(2)
km.fit(covset,labels)
km.transform(covset)
| 0
| 0
| 0
|
62b11e6c5da0cc2133a92ea451b6649128fcb41b
| 51,262
|
py
|
Python
|
raphael/utils/net.py
|
major1201/raphael
|
18d7060834be7645b66144ba2a1638f3e1db2dd2
|
[
"MIT"
] | null | null | null |
raphael/utils/net.py
|
major1201/raphael
|
18d7060834be7645b66144ba2a1638f3e1db2dd2
|
[
"MIT"
] | null | null | null |
raphael/utils/net.py
|
major1201/raphael
|
18d7060834be7645b66144ba2a1638f3e1db2dd2
|
[
"MIT"
] | null | null | null |
# encoding= utf-8
from __future__ import division, absolute_import, with_statement, print_function
import requests
def download_file(url, file_path, params=None, proxies=None, request_session=None, cookies=None):
"""
proxies = {
"http": "http://10.10.1.10:3128",
"https": "http://10.10.1.10:1080",
}
"""
if not request_session:
request_session = requests.session()
r = request_session.get(url, params=params, stream=True, proxies=proxies, cookies=cookies)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(1024):
if chunk:
f.write(chunk)
f.flush()
| 70.030055
| 145
| 0.635324
|
# encoding= utf-8
from __future__ import division, absolute_import, with_statement, print_function
import requests
def download_file(url, file_path, params=None, proxies=None, request_session=None, cookies=None):
"""
proxies = {
"http": "http://10.10.1.10:3128",
"https": "http://10.10.1.10:1080",
}
"""
if not request_session:
request_session = requests.session()
r = request_session.get(url, params=params, stream=True, proxies=proxies, cookies=cookies)
with open(file_path, 'wb') as f:
for chunk in r.iter_content(1024):
if chunk:
f.write(chunk)
f.flush()
def cidr2netmask(cidr):
if cidr < 0 or cidr > 32:
raise ValueError('CIDR should be in range [0, 32].')
bin_ip = '1' * cidr + '0' * (32 - cidr)
return bin2ip(bin_ip)
def netmask2cidr(netmask):
mask_arr = netmask.split('.')
return len(''.join(list(map(lambda part: bin(int(part))[2:].zfill(8), mask_arr))).rstrip('0'))
def ip2bin(ip):
return ''.join(list(map(lambda part: bin(int(part))[2:].zfill(8), ip.split('.'))))
def bin2ip(bin_ip):
segment = [bin_ip[i: i + 8] for i in range(0, 32, 8)]
return '.'.join(str(int(s, 2)) for s in segment)
def get_content_type_by_ext(ext, df='application/octet-stream'):
m = {
".123": "application/vnd.lotus-1-2-3", # Lotus 1-2-3
".3dml": "text/vnd.in3d.3dml", # In3D - 3DML
".3g2": "video/3gpp2", # 3GP2
".3gp": "video/3gpp", # 3GP
".7z": "application/x-7z-compressed", # 7-Zip
".aab": "application/x-authorware-bin", # Adobe (Macropedia) Authorware - Binary File
".aac": "audio/x-aac", # Advanced Audio Coding (AAC)
".aam": "application/x-authorware-map", # Adobe (Macropedia) Authorware - Map
".aas": "application/x-authorware-seg", # Adobe (Macropedia) Authorware - Segment File
".abw": "application/x-abiword", # AbiWord
".ac": "application/pkix-attr-cert", # Attribute Certificate
".acc": "application/vnd.americandynamics.acc", # Active Content Compression
".ace": "application/x-ace-compressed", # Ace Archive
".acu": "application/vnd.acucobol", # ACU Cobol
".adp": "audio/adpcm", # Adaptive differential pulse-code modulation
".aep": "application/vnd.audiograph", # Audiograph
".afp": "application/vnd.ibm.modcap", # MO:DCA-P
".ahead": "application/vnd.ahead.space", # Ahead AIR Application
".ai": "application/postscript", # PostScript
".aif": "audio/x-aiff", # Audio Interchange File Format
".air": "application/vnd.adobe.air-application-installer-package+zip", # Adobe AIR Application
".ait": "application/vnd.dvb.ait", # Digital Video Broadcasting
".ami": "application/vnd.amiga.ami", # AmigaDE
".apk": "application/vnd.android.package-archive", # Android Package Archive
".application": "application/x-ms-application", # Microsoft ClickOnce
".apr": "application/vnd.lotus-approach", # Lotus Approach
".asf": "video/x-ms-asf", # Microsoft Advanced Systems Format (ASF)
".aso": "application/vnd.accpac.simply.aso", # Simply Accounting
".atc": "application/vnd.acucorp", # ACU Cobol
".atom": "application/atom+xml", # Atom Syndication Format
".atomcat": "application/atomcat+xml", # Atom Publishing Protocol
".atomsvc": "application/atomsvc+xml", # Atom Publishing Protocol Service Document
".atx": "application/vnd.antix.game-component", # Antix Game Player
".au": "audio/basic", # Sun Audio - Au file format
".avi": "video/x-msvideo", # Audio Video Interleave (AVI)
".aw": "application/applixware", # Applixware
".azf": "application/vnd.airzip.filesecure.azf", # AirZip FileSECURE
".azs": "application/vnd.airzip.filesecure.azs", # AirZip FileSECURE
".azw": "application/vnd.amazon.ebook", # Amazon Kindle eBook format
".bcpio": "application/x-bcpio", # Binary CPIO Archive
".bdf": "application/x-font-bdf", # Glyph Bitmap Distribution Format
".bdm": "application/vnd.syncml.dm+wbxml", # SyncML - Device Management
".bed": "application/vnd.realvnc.bed", # RealVNC
".bh2": "application/vnd.fujitsu.oasysprs", # Fujitsu Oasys
".bin": "application/octet-stream", # Binary Data
".bmi": "application/vnd.bmi", # BMI Drawing Data Interchange
".bmp": "image/bmp", # Bitmap Image File
".box": "application/vnd.previewsystems.box", # Preview Systems ZipLock/VBox
".btif": "image/prs.btif", # BTIF
".bz": "application/x-bzip", # Bzip Archive
".bz2": "application/x-bzip2", # Bzip2 Archive
".c": "text/x-c", # C Source File
".c11amc": "application/vnd.cluetrust.cartomobile-config", # ClueTrust CartoMobile - Config
".c11amz": "application/vnd.cluetrust.cartomobile-config-pkg", # ClueTrust CartoMobile - Config Package
".c4g": "application/vnd.clonk.c4group", # Clonk Game
".cab": "application/vnd.ms-cab-compressed", # Microsoft Cabinet File
".car": "application/vnd.curl.car", # CURL Applet
".cat": "application/vnd.ms-pki.seccat", # Microsoft Trust UI Provider - Security Catalog
".ccxml": "application/ccxml+xml,", # Voice Browser Call Control
".cdbcmsg": "application/vnd.contact.cmsg", # CIM Database
".cdkey": "application/vnd.mediastation.cdkey", # MediaRemote
".cdmia": "application/cdmi-capability", # Cloud Data Management Interface (CDMI) - Capability
".cdmic": "application/cdmi-container", # Cloud Data Management Interface (CDMI) - Contaimer
".cdmid": "application/cdmi-domain", # Cloud Data Management Interface (CDMI) - Domain
".cdmio": "application/cdmi-object", # Cloud Data Management Interface (CDMI) - Object
".cdmiq": "application/cdmi-queue", # Cloud Data Management Interface (CDMI) - Queue
".cdx": "chemical/x-cdx", # ChemDraw eXchange file
".cdxml": "application/vnd.chemdraw+xml", # CambridgeSoft Chem Draw
".cdy": "application/vnd.cinderella", # Interactive Geometry Software Cinderella
".cer": "application/pkix-cert", # Internet Public Key Infrastructure - Certificate
".cgm": "image/cgm", # Computer Graphics Metafile
".chat": "application/x-chat", # pIRCh
".chm": "application/vnd.ms-htmlhelp", # Microsoft Html Help File
".chrt": "application/vnd.kde.kchart", # KDE KOffice Office Suite - KChart
".cif": "chemical/x-cif", # Crystallographic Interchange Format
".cii": "application/vnd.anser-web-certificate-issue-initiation", # ANSER-WEB Terminal Client - Certificate Issue
".cil": "application/vnd.ms-artgalry", # Microsoft Artgalry
".cla": "application/vnd.claymore", # Claymore Data Files
".class": "application/java-vm", # Java Bytecode File
".clkk": "application/vnd.crick.clicker.keyboard", # CrickSoftware - Clicker - Keyboard
".clkp": "application/vnd.crick.clicker.palette", # CrickSoftware - Clicker - Palette
".clkt": "application/vnd.crick.clicker.template", # CrickSoftware - Clicker - Template
".clkw": "application/vnd.crick.clicker.wordbank", # CrickSoftware - Clicker - Wordbank
".clkx": "application/vnd.crick.clicker", # CrickSoftware - Clicker
".clp": "application/x-msclip", # Microsoft Clipboard Clip
".cmc": "application/vnd.cosmocaller", # CosmoCaller
".cmdf": "chemical/x-cmdf", # CrystalMaker Data Format
".cml": "chemical/x-cml", # Chemical Markup Language
".cmp": "application/vnd.yellowriver-custom-menu", # CustomMenu
".cmx": "image/x-cmx", # Corel Metafile Exchange (CMX)
".cod": "application/vnd.rim.cod", # Blackberry COD File
".cpio": "application/x-cpio", # CPIO Archive
".cpt": "application/mac-compactpro", # Compact Pro
".crd": "application/x-mscardfile", # Microsoft Information Card
".crl": "application/pkix-crl", # Internet Public Key Infrastructure - Certificate Revocation Lists
".cryptonote": "application/vnd.rig.cryptonote", # CryptoNote
".csh": "application/x-csh", # C Shell Script
".csml": "chemical/x-csml", # Chemical Style Markup Language
".csp": "application/vnd.commonspace", # Sixth Floor Media - CommonSpace
".css": "text/css", # Cascading Style Sheets (CSS)
".csv": "text/csv", # Comma-Seperated Values
".cu": "application/cu-seeme", # CU-SeeMe
".curl": "text/vnd.curl", # Curl - Applet
".cww": "application/prs.cww", # CU-Writer
".dae": "model/vnd.collada+xml", # COLLADA
".daf": "application/vnd.mobius.daf", # Mobius Management Systems - UniversalArchive
".davmount": "application/davmount+xml", # Web Distributed Authoring and Versioning
".dcurl": "text/vnd.curl.dcurl", # Curl - Detached Applet
".dd2": "application/vnd.oma.dd2+xml", # OMA Download Agents
".ddd": "application/vnd.fujixerox.ddd", # Fujitsu - Xerox 2D CAD Data
".deb": "application/x-debian-package", # Debian Package
".der": "application/x-x509-ca-cert", # X.509 Certificate
".dfac": "application/vnd.dreamfactory", # DreamFactory
".dir": "application/x-director", # Adobe Shockwave Player
".dis": "application/vnd.mobius.dis", # Mobius Management Systems - Distribution Database
".djvu": "image/vnd.djvu", # DjVu
".dna": "application/vnd.dna", # New Moon Liftoff/DNA
".doc": "application/msword", # Microsoft Word
".docm": "application/vnd.ms-word.document.macroenabled.12", # Micosoft Word - Macro-Enabled Document
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document", # Microsoft Office - OOXML - Word Document
".dotm": "application/vnd.ms-word.template.macroenabled.12", # Micosoft Word - Macro-Enabled Template
".dotx": "application/vnd.openxmlformats-officedocument.wordprocessingml.template", # Microsoft Office - OOXML - Word Document Template
".dp": "application/vnd.osgi.dp", # OSGi Deployment Package
".dpg": "application/vnd.dpgraph", # DPGraph
".dra": "audio/vnd.dra", # DRA Audio
".dsc": "text/prs.lines.tag", # PRS Lines Tag
".dssc": "application/dssc+der", # Data Structure for the Security Suitability of Cryptographic Algorithms
".dtb": "application/x-dtbook+xml", # Digital Talking Book
".dtd": "application/xml-dtd", # Document Type Definition
".dts": "audio/vnd.dts", # DTS Audio
".dtshd": "audio/vnd.dts.hd", # DTS High Definition Audio
".dvi": "application/x-dvi", # Device Independent File Format (DVI)
".dwf": "model/vnd.dwf", # Autodesk Design Web Format (DWF)
".dwg": "image/vnd.dwg", # DWG Drawing
".dxf": "image/vnd.dxf", # AutoCAD DXF
".dxp": "application/vnd.spotfire.dxp", # TIBCO Spotfire
".ecelp4800": "audio/vnd.nuera.ecelp4800", # Nuera ECELP 4800
".ecelp7470": "audio/vnd.nuera.ecelp7470", # Nuera ECELP 7470
".ecelp9600": "audio/vnd.nuera.ecelp9600", # Nuera ECELP 9600
".edm": "application/vnd.novadigm.edm", # Novadigm's RADIA and EDM products
".edx": "application/vnd.novadigm.edx", # Novadigm's RADIA and EDM products
".efif": "application/vnd.picsel", # Pcsel eFIF File
".ei6": "application/vnd.pg.osasli", # Proprietary P&G Standard Reporting System
".eml": "message/rfc822", # Email Message
".emma": "application/emma+xml", # Extensible MultiModal Annotation
".eol": "audio/vnd.digital-winds", # Digital Winds Music
".eot": "application/vnd.ms-fontobject", # Microsoft Embedded OpenType
".epub": "application/epub+zip", # Electronic Publication
".es": "application/ecmascript", # ECMAScript
".es3": "application/vnd.eszigno3+xml", # MICROSEC e-Szign¢
".esf": "application/vnd.epson.esf", # QUASS Stream Player
".etx": "text/x-setext", # Setext
".exe": "application/x-msdownload", # Microsoft Application
".exi": "application/exi", # Efficient XML Interchange
".ext": "application/vnd.novadigm.ext", # Novadigm's RADIA and EDM products
".ez2": "application/vnd.ezpix-album", # EZPix Secure Photo Album
".ez3": "application/vnd.ezpix-package", # EZPix Secure Photo Album
".f": "text/x-fortran", # Fortran Source File
".f4v": "video/x-f4v", # Flash Video
".fbs": "image/vnd.fastbidsheet", # FastBid Sheet
".fcs": "application/vnd.isac.fcs", # International Society for Advancement of Cytometry
".fdf": "application/vnd.fdf", # Forms Data Format
".fe_launch": "application/vnd.denovo.fcselayout-link", # FCS Express Layout Link
".fg5": "application/vnd.fujitsu.oasysgp", # Fujitsu Oasys
".fh": "image/x-freehand", # FreeHand MX
".fig": "application/x-xfig", # Xfig
".fli": "video/x-fli", # FLI/FLC Animation Format
".flo": "application/vnd.micrografx.flo", # Micrografx
".flv": "video/x-flv", # Flash Video
".flw": "application/vnd.kde.kivio", # KDE KOffice Office Suite - Kivio
".flx": "text/vnd.fmi.flexstor", # FLEXSTOR
".fly": "text/vnd.fly", # mod_fly / fly.cgi
".fm": "application/vnd.framemaker", # FrameMaker Normal Format
".fnc": "application/vnd.frogans.fnc", # Frogans Player
".fpx": "image/vnd.fpx", # FlashPix
".fsc": "application/vnd.fsc.weblaunch", # Friendly Software Corporation
".fst": "image/vnd.fst", # FAST Search & Transfer ASA
".ftc": "application/vnd.fluxtime.clip", # FluxTime Clip
".fti": "application/vnd.anser-web-funds-transfer-initiation", # ANSER-WEB Terminal Client - Web Funds Transfer
".fvt": "video/vnd.fvt", # FAST Search & Transfer ASA
".fxp": "application/vnd.adobe.fxp", # Adobe Flex Project
".fzs": "application/vnd.fuzzysheet", # FuzzySheet
".g2w": "application/vnd.geoplan", # GeoplanW
".g3": "image/g3fax", # G3 Fax Image
".g3w": "application/vnd.geospace", # GeospacW
".gac": "application/vnd.groove-account", # Groove - Account
".gdl": "model/vnd.gdl", # Geometric Description Language (GDL)
".geo": "application/vnd.dynageo", # DynaGeo
".gex": "application/vnd.geometry-explorer", # GeoMetry Explorer
".ggb": "application/vnd.geogebra.file", # GeoGebra
".ggt": "application/vnd.geogebra.tool", # GeoGebra
".ghf": "application/vnd.groove-help", # Groove - Help
".gif": "image/gif", # Graphics Interchange Format
".gim": "application/vnd.groove-identity-message", # Groove - Identity Message
".gmx": "application/vnd.gmx", # GameMaker ActiveX
".gnumeric": "application/x-gnumeric", # Gnumeric
".gph": "application/vnd.flographit", # NpGraphIt
".gqf": "application/vnd.grafeq", # GrafEq
".gram": "application/srgs", # Speech Recognition Grammar Specification
".grv": "application/vnd.groove-injector", # Groove - Injector
".grxml": "application/srgs+xml", # Speech Recognition Grammar Specification - XML
".gsf": "application/x-font-ghostscript", # Ghostscript Font
".gtar": "application/x-gtar", # GNU Tar Files
".gtm": "application/vnd.groove-tool-message", # Groove - Tool Message
".gtw": "model/vnd.gtw", # Gen-Trix Studio
".gv": "text/vnd.graphviz", # Graphviz
".gxt": "application/vnd.geonext", # GEONExT and JSXGraph
".h261": "video/h261", # H.261
".h263": "video/h263", # H.263
".h264": "video/h264", # H.264
".hal": "application/vnd.hal+xml", # Hypertext Application Language
".hbci": "application/vnd.hbci", # Homebanking Computer Interface (HBCI)
".hdf": "application/x-hdf", # Hierarchical Data Format
".hlp": "application/winhlp", # WinHelp
".hpgl": "application/vnd.hp-hpgl", # HP-GL/2 and HP RTL
".hpid": "application/vnd.hp-hpid", # Hewlett Packard Instant Delivery
".hps": "application/vnd.hp-hps", # Hewlett-Packard's WebPrintSmart
".hqx": "application/mac-binhex40", # Macintosh BinHex 4.0
".htke": "application/vnd.kenameaapp", # Kenamea App
".html": "text/html", # HyperText Markup Language (HTML)
".hvd": "application/vnd.yamaha.hv-dic", # HV Voice Dictionary
".hvp": "application/vnd.yamaha.hv-voice", # HV Voice Parameter
".hvs": "application/vnd.yamaha.hv-script", # HV Script
".i2g": "application/vnd.intergeo", # Interactive Geometry Software
".icc": "application/vnd.iccprofile", # ICC profile
".ice": "x-conference/x-cooltalk", # CoolTalk
".ico": "image/x-icon", # Icon Image
".ics": "text/calendar", # iCalendar
".ief": "image/ief", # Image Exchange Format
".ifm": "application/vnd.shana.informed.formdata", # Shana Informed Filler
".igl": "application/vnd.igloader", # igLoader
".igm": "application/vnd.insors.igm", # IOCOM Visimeet
".igs": "model/iges", # Initial Graphics Exchange Specification (IGES)
".igx": "application/vnd.micrografx.igx", # Micrografx iGrafx Professional
".iif": "application/vnd.shana.informed.interchange", # Shana Informed Filler
".imp": "application/vnd.accpac.simply.imp", # Simply Accounting - Data Import
".ims": "application/vnd.ms-ims", # Microsoft Class Server
".ipfix": "application/ipfix", # Internet Protocol Flow Information Export
".ipk": "application/vnd.shana.informed.package", # Shana Informed Filler
".irm": "application/vnd.ibm.rights-management", # IBM DB2 Rights Manager
".irp": "application/vnd.irepository.package+xml", # iRepository / Lucidoc Editor
".itp": "application/vnd.shana.informed.formtemplate", # Shana Informed Filler
".ivp": "application/vnd.immervision-ivp", # ImmerVision PURE Players
".ivu": "application/vnd.immervision-ivu", # ImmerVision PURE Players
".jad": "text/vnd.sun.j2me.app-descriptor", # J2ME App Descriptor
".jam": "application/vnd.jam", # Lightspeed Audio Lab
".jar": "application/java-archive", # Java Archive
".java": "text/x-java-source,java", # Java Source File
".jisp": "application/vnd.jisp", # RhymBox
".jlt": "application/vnd.hp-jlyt", # HP Indigo Digital Press - Job Layout Languate
".jnlp": "application/x-java-jnlp-file", # Java Network Launching Protocol
".joda": "application/vnd.joost.joda-archive", # Joda Archive
".jpeg": "image/jpeg", # JPEG Image
".jpg": "image/jpeg", # JPEG Image
".jpgv": "video/jpeg", # JPGVideo
".jpm": "video/jpm", # JPEG 2000 Compound Image File Format
".js": "application/javascript", # JavaScript
".json": "application/json", # JavaScript Object Notation (JSON)
".karbon": "application/vnd.kde.karbon", # KDE KOffice Office Suite - Karbon
".kfo": "application/vnd.kde.kformula", # KDE KOffice Office Suite - Kformula
".kia": "application/vnd.kidspiration", # Kidspiration
".kml": "application/vnd.google-earth.kml+xml", # Google Earth - KML
".kmz": "application/vnd.google-earth.kmz", # Google Earth - Zipped KML
".kne": "application/vnd.kinar", # Kinar Applications
".kon": "application/vnd.kde.kontour", # KDE KOffice Office Suite - Kontour
".kpr": "application/vnd.kde.kpresenter", # KDE KOffice Office Suite - Kpresenter
".ksp": "application/vnd.kde.kspread", # KDE KOffice Office Suite - Kspread
".ktx": "image/ktx", # OpenGL Textures (KTX)
".ktz": "application/vnd.kahootz", # Kahootz
".kwd": "application/vnd.kde.kword", # KDE KOffice Office Suite - Kword
".lasxml": "application/vnd.las.las+xml", # Laser App Enterprise
".latex": "application/x-latex", # LaTeX
".lbd": "application/vnd.llamagraphics.life-balance.desktop", # Life Balance - Desktop Edition
".lbe": "application/vnd.llamagraphics.life-balance.exchange+xml", # Life Balance - Exchange Format
".les": "application/vnd.hhe.lesson-player", # Archipelago Lesson Player
".link66": "application/vnd.route66.link66+xml", # ROUTE 66 Location Based Services
".lrm": "application/vnd.ms-lrm", # Microsoft Learning Resource Module
".ltf": "application/vnd.frogans.ltf", # Frogans Player
".lvp": "audio/vnd.lucent.voice", # Lucent Voice
".lwp": "application/vnd.lotus-wordpro", # Lotus Wordpro
".m21": "application/mp21", # MPEG-21
".m3u": "audio/x-mpegurl", # M3U (Multimedia Playlist)
".m3u8": "application/vnd.apple.mpegurl", # Multimedia Playlist Unicode
".m4v": "video/x-m4v", # M4v
".ma": "application/mathematica", # Mathematica Notebooks
".mads": "application/mads+xml", # Metadata Authority Description Schema
".mag": "application/vnd.ecowin.chart", # EcoWin Chart
".mathml": "application/mathml+xml", # Mathematical Markup Language
".mbk": "application/vnd.mobius.mbk", # Mobius Management Systems - Basket file
".mbox": "application/mbox", # Mbox database files
".mc1": "application/vnd.medcalcdata", # MedCalc
".mcd": "application/vnd.mcd", # Micro CADAM Helix D&D
".mcurl": "text/vnd.curl.mcurl", # Curl - Manifest File
".mdb": "application/x-msaccess", # Microsoft Access
".mdi": "image/vnd.ms-modi", # Microsoft Document Imaging Format
".meta4": "application/metalink4+xml", # Metalink
".mets": "application/mets+xml", # Metadata Encoding and Transmission Standard
".mfm": "application/vnd.mfmp", # Melody Format for Mobile Platform
".mgp": "application/vnd.osgeo.mapguide.package", # MapGuide DBXML
".mgz": "application/vnd.proteus.magazine", # EFI Proteus
".mid": "audio/midi", # MIDI - Musical Instrument Digital Interface
".mif": "application/vnd.mif", # FrameMaker Interchange Format
".mj2": "video/mj2", # Motion JPEG 2000
".mlp": "application/vnd.dolby.mlp", # Dolby Meridian Lossless Packing
".mmd": "application/vnd.chipnuts.karaoke-mmd", # Karaoke on Chipnuts Chipsets
".mmf": "application/vnd.smaf", # SMAF File
".mmr": "image/vnd.fujixerox.edmics-mmr", # EDMICS 2000
".mny": "application/x-msmoney", # Microsoft Money
".mods": "application/mods+xml", # Metadata Object Description Schema
".movie": "video/x-sgi-movie", # SGI Movie
".mp4": "application/mp4", # MPEG4
# ".mp4": "video/mp4", # MPEG-4 Video
".mp4a": "audio/mp4", # MPEG-4 Audio
".mpc": "application/vnd.mophun.certificate", # Mophun Certificate
".mpeg": "video/mpeg", # MPEG Video
".mpga": "audio/mpeg", # MPEG Audio
".mpkg": "application/vnd.apple.installer+xml", # Apple Installer Package
".mpm": "application/vnd.blueice.multipass", # Blueice Research Multipass
".mpn": "application/vnd.mophun.application", # Mophun VM
".mpp": "application/vnd.ms-project", # Microsoft Project
".mpy": "application/vnd.ibm.minipay", # MiniPay
".mqy": "application/vnd.mobius.mqy", # Mobius Management Systems - Query File
".mrc": "application/marc", # MARC Formats
".mrcx": "application/marcxml+xml", # MARC21 XML Schema
".mscml": "application/mediaservercontrol+xml", # Media Server Control Markup Language
".mseq": "application/vnd.mseq", # 3GPP MSEQ File
".msf": "application/vnd.epson.msf", # QUASS Stream Player
".msh": "model/mesh", # Mesh Data Type
".msl": "application/vnd.mobius.msl", # Mobius Management Systems - Script Language
".msty": "application/vnd.muvee.style", # Muvee Automatic Video Editing
".mts": "model/vnd.mts", # Virtue MTS
".mus": "application/vnd.musician", # MUsical Score Interpreted Code Invented for the ASCII designation of Notation
".musicxml": "application/vnd.recordare.musicxml+xml", # Recordare Applications
".mvb": "application/x-msmediaview", # Microsoft MediaView
".mwf": "application/vnd.mfer", # Medical Waveform Encoding Format
".mxf": "application/mxf", # Material Exchange Format
".mxl": "application/vnd.recordare.musicxml", # Recordare Applications
".mxml": "application/xv+xml", # MXML
".mxs": "application/vnd.triscape.mxs", # Triscape Map Explorer
".mxu": "video/vnd.mpegurl", # MPEG Url
".n-gage": "application/vnd.nokia.n-gage.symbian.install", # N-Gage Game Installer
".n3": "text/n3", # Notation3
".nbp": "application/vnd.wolfram.player", # Mathematica Notebook Player
".nc": "application/x-netcdf", # Network Common Data Form (NetCDF)
".ncx": "application/x-dtbncx+xml", # Navigation Control file for XML (for ePub)
".ngdat": "application/vnd.nokia.n-gage.data", # N-Gage Game Data
".nlu": "application/vnd.neurolanguage.nlu", # neuroLanguage
".nml": "application/vnd.enliven", # Enliven Viewer
".nnd": "application/vnd.noblenet-directory", # NobleNet Directory
".nns": "application/vnd.noblenet-sealer", # NobleNet Sealer
".nnw": "application/vnd.noblenet-web", # NobleNet Web
".npx": "image/vnd.net-fpx", # FlashPix
".nsf": "application/vnd.lotus-notes", # Lotus Notes
".oa2": "application/vnd.fujitsu.oasys2", # Fujitsu Oasys
".oa3": "application/vnd.fujitsu.oasys3", # Fujitsu Oasys
".oas": "application/vnd.fujitsu.oasys", # Fujitsu Oasys
".obd": "application/x-msbinder", # Microsoft Office Binder
".oda": "application/oda", # Office Document Architecture
".odb": "application/vnd.oasis.opendocument.database", # OpenDocument Database
".odc": "application/vnd.oasis.opendocument.chart", # OpenDocument Chart
".odf": "application/vnd.oasis.opendocument.formula", # OpenDocument Formula
".odft": "application/vnd.oasis.opendocument.formula-template", # OpenDocument Formula Template
".odg": "application/vnd.oasis.opendocument.graphics", # OpenDocument Graphics
".odi": "application/vnd.oasis.opendocument.image", # OpenDocument Image
".odm": "application/vnd.oasis.opendocument.text-master", # OpenDocument Text Master
".odp": "application/vnd.oasis.opendocument.presentation", # OpenDocument Presentation
".ods": "application/vnd.oasis.opendocument.spreadsheet", # OpenDocument Spreadsheet
".odt": "application/vnd.oasis.opendocument.text", # OpenDocument Text
".oga": "audio/ogg", # Ogg Audio
".ogv": "video/ogg", # Ogg Video
".ogx": "application/ogg", # Ogg
".onetoc": "application/onenote", # Microsoft OneNote
".opf": "application/oebps-package+xml", # Open eBook Publication Structure
".org": "application/vnd.lotus-organizer", # Lotus Organizer
".osf": "application/vnd.yamaha.openscoreformat", # Open Score Format
".osfpvg": "application/vnd.yamaha.openscoreformat.osfpvg+xml", # OSFPVG
".otc": "application/vnd.oasis.opendocument.chart-template", # OpenDocument Chart Template
".otf": "application/x-font-otf", # OpenType Font File
".otg": "application/vnd.oasis.opendocument.graphics-template", # OpenDocument Graphics Template
".oth": "application/vnd.oasis.opendocument.text-web", # Open Document Text Web
".oti": "application/vnd.oasis.opendocument.image-template", # OpenDocument Image Template
".otp": "application/vnd.oasis.opendocument.presentation-template", # OpenDocument Presentation Template
".ots": "application/vnd.oasis.opendocument.spreadsheet-template", # OpenDocument Spreadsheet Template
".ott": "application/vnd.oasis.opendocument.text-template", # OpenDocument Text Template
".oxt": "application/vnd.openofficeorg.extension", # Open Office Extension
".p": "text/x-pascal", # Pascal Source File
".p10": "application/pkcs10", # PKCS #10 - Certification Request Standard
".p12": "application/x-pkcs12", # PKCS #12 - Personal Information Exchange Syntax Standard
".p7b": "application/x-pkcs7-certificates", # PKCS #7 - Cryptographic Message Syntax Standard (Certificates)
".p7m": "application/pkcs7-mime", # PKCS #7 - Cryptographic Message Syntax Standard
".p7r": "application/x-pkcs7-certreqresp", # PKCS #7 - Cryptographic Message Syntax Standard (Certificate Request Response)
".p7s": "application/pkcs7-signature", # PKCS #7 - Cryptographic Message Syntax Standard
".p8": "application/pkcs8", # PKCS #8 - Private-Key Information Syntax Standard
".par": "text/plain-bas", # BAS Partitur Format
".paw": "application/vnd.pawaafile", # PawaaFILE
".pbd": "application/vnd.powerbuilder6", # PowerBuilder
".pbm": "image/x-portable-bitmap", # Portable Bitmap Format
".pcf": "application/x-font-pcf", # Portable Compiled Format
".pcl": "application/vnd.hp-pcl", # HP Printer Command Language
".pclxl": "application/vnd.hp-pclxl", # PCL 6 Enhanced (Formely PCL XL)
".pcurl": "application/vnd.curl.pcurl", # CURL Applet
".pcx": "image/x-pcx", # PCX Image
".pdb": "application/vnd.palm", # PalmOS Data
".pdf": "application/pdf", # Adobe Portable Document Format
".pfa": "application/x-font-type1", # PostScript Fonts
".pfr": "application/font-tdpfr", # Portable Font Resource
".pgm": "image/x-portable-graymap", # Portable Graymap Format
".pgn": "application/x-chess-pgn", # Portable Game Notation (Chess Games)
".pgp": "application/pgp-signature", # Pretty Good Privacy - Signature
".pic": "image/x-pict", # PICT Image
".pki": "application/pkixcmp", # Internet Public Key Infrastructure - Certificate Management Protocole
".pkipath": "application/pkix-pkipath", # Internet Public Key Infrastructure - Certification Path
".plb": "application/vnd.3gpp.pic-bw-large", # 3rd Generation Partnership Project - Pic Large
".plc": "application/vnd.mobius.plc", # Mobius Management Systems - Policy Definition Language File
".plf": "application/vnd.pocketlearn", # PocketLearn Viewers
".pls": "application/pls+xml", # Pronunciation Lexicon Specification
".pml": "application/vnd.ctc-posml", # PosML
".png": "image/png", # Portable Network Graphics (PNG)
".pnm": "image/x-portable-anymap", # Portable Anymap Image
".portpkg": "application/vnd.macports.portpkg", # MacPorts Port System
".potm": "application/vnd.ms-powerpoint.template.macroenabled.12", # Micosoft PowerPoint - Macro-Enabled Template File
".potx": "application/vnd.openxmlformats-officedocument.presentationml.template", # Microsoft Office - OOXML - Presentation Template
".ppam": "application/vnd.ms-powerpoint.addin.macroenabled.12", # Microsoft PowerPoint - Add-in file
".ppd": "application/vnd.cups-ppd", # Adobe PostScript Printer Description File Format
".ppm": "image/x-portable-pixmap", # Portable Pixmap Format
".ppsm": "application/vnd.ms-powerpoint.slideshow.macroenabled.12", # Microsoft PowerPoint - Macro-Enabled Slide Show File
".ppsx": "application/vnd.openxmlformats-officedocument.presentationml.slideshow", # Microsoft Office - OOXML - Presentation (Slideshow)
".ppt": "application/vnd.ms-powerpoint", # Microsoft PowerPoint
".pptm": "application/vnd.ms-powerpoint.presentation.macroenabled.12", # Microsoft PowerPoint - Macro-Enabled Presentation File
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation", # Microsoft Office - OOXML - Presentation
".prc": "application/x-mobipocket-ebook", # Mobipocket
".pre": "application/vnd.lotus-freelance", # Lotus Freelance
".prf": "application/pics-rules", # PICSRules
".psb": "application/vnd.3gpp.pic-bw-small", # 3rd Generation Partnership Project - Pic Small
".psd": "image/vnd.adobe.photoshop", # Photoshop Document
".psf": "application/x-font-linux-psf", # PSF Fonts
".pskcxml": "application/pskc+xml", # Portable Symmetric Key Container
".ptid": "application/vnd.pvi.ptid1", # Princeton Video Image
".pub": "application/x-mspublisher", # Microsoft Publisher
".pvb": "application/vnd.3gpp.pic-bw-var", # 3rd Generation Partnership Project - Pic Var
".pwn": "application/vnd.3m.post-it-notes", # 3M Post It Notes
".pya": "audio/vnd.ms-playready.media.pya", # Microsoft PlayReady Ecosystem
".pyv": "video/vnd.ms-playready.media.pyv", # Microsoft PlayReady Ecosystem Video
".qam": "application/vnd.epson.quickanime", # QuickAnime Player
".qbo": "application/vnd.intu.qbo", # Open Financial Exchange
".qfx": "application/vnd.intu.qfx", # Quicken
".qps": "application/vnd.publishare-delta-tree", # PubliShare Objects
".qt": "video/quicktime", # Quicktime Video
".qxd": "application/vnd.quark.quarkxpress", # QuarkXpress
".ram": "audio/x-pn-realaudio", # Real Audio Sound
".rar": "application/x-rar-compressed", # RAR Archive
".ras": "image/x-cmu-raster", # CMU Image
".rcprofile": "application/vnd.ipunplugged.rcprofile", # IP Unplugged Roaming Client
".rdf": "application/rdf+xml", # Resource Description Framework
".rdz": "application/vnd.data-vision.rdz", # RemoteDocs R-Viewer
".rep": "application/vnd.businessobjects", # BusinessObjects
".res": "application/x-dtbresource+xml", # Digital Talking Book - Resource File
".rgb": "image/x-rgb", # Silicon Graphics RGB Bitmap
".rif": "application/reginfo+xml", # IMS Networks
".rip": "audio/vnd.rip", # Hit'n'Mix
".rl": "application/resource-lists+xml", # XML Resource Lists
".rlc": "image/vnd.fujixerox.edmics-rlc", # EDMICS 2000
".rld": "application/resource-lists-diff+xml", # XML Resource Lists Diff
".rm": "application/vnd.rn-realmedia", # RealMedia
".rmp": "audio/x-pn-realaudio-plugin", # Real Audio Sound
".rms": "application/vnd.jcp.javame.midlet-rms", # Mobile Information Device Profile
".rnc": "application/relax-ng-compact-syntax", # Relax NG Compact Syntax
".rp9": "application/vnd.cloanto.rp9", # RetroPlatform Player
".rpss": "application/vnd.nokia.radio-presets", # Nokia Radio Application - Preset
".rpst": "application/vnd.nokia.radio-preset", # Nokia Radio Application - Preset
".rq": "application/sparql-query", # SPARQL - Query
".rs": "application/rls-services+xml", # XML Resource Lists
".rsd": "application/rsd+xml", # Really Simple Discovery
".rss": "application/rss+xml", # RSS - Really Simple Syndication
".rtf": "application/rtf", # Rich Text Format
".rtx": "text/richtext", # Rich Text Format (RTF)
".s": "text/x-asm", # Assembler Source File
".saf": "application/vnd.yamaha.smaf-audio", # SMAF Audio
".sbml": "application/sbml+xml", # Systems Biology Markup Language
".sc": "application/vnd.ibm.secure-container", # IBM Electronic Media Management System - Secure Container
".scd": "application/x-msschedule", # Microsoft Schedule+
".scm": "application/vnd.lotus-screencam", # Lotus Screencam
".scq": "application/scvp-cv-request", # Server-Based Certificate Validation Protocol - Validation Request
".scs": "application/scvp-cv-response", # Server-Based Certificate Validation Protocol - Validation Response
".scurl": "text/vnd.curl.scurl", # Curl - Source Code
".sda": "application/vnd.stardivision.draw", # StarOffice - Draw
".sdc": "application/vnd.stardivision.calc", # StarOffice - Calc
".sdd": "application/vnd.stardivision.impress", # StarOffice - Impress
".sdkm": "application/vnd.solent.sdkm+xml", # SudokuMagic
".sdp": "application/sdp", # Session Description Protocol
".sdw": "application/vnd.stardivision.writer", # StarOffice - Writer
".see": "application/vnd.seemail", # SeeMail
".seed": "application/vnd.fdsn.seed", # Digital Siesmograph Networks - SEED Datafiles
".sema": "application/vnd.sema", # Secured eMail
".semd": "application/vnd.semd", # Secured eMail
".semf": "application/vnd.semf", # Secured eMail
".ser": "application/java-serialized-object", # Java Serialized Object
".setpay": "application/set-payment-initiation", # Secure Electronic Transaction - Payment
".setreg": "application/set-registration-initiation", # Secure Electronic Transaction - Registration
".sfd-hdstx": "application/vnd.hydrostatix.sof-data", # Hydrostatix Master Suite
".sfs": "application/vnd.spotfire.sfs", # TIBCO Spotfire
".sgl": "application/vnd.stardivision.writer-global", # StarOffice - Writer (Global)
".sgml": "text/sgml", # Standard Generalized Markup Language (SGML)
".sh": "application/x-sh", # Bourne Shell Script
".shar": "application/x-shar", # Shell Archive
".shf": "application/shf+xml", # S Hexdump Format
".sis": "application/vnd.symbian.install", # Symbian Install Package
".sit": "application/x-stuffit", # Stuffit Archive
".sitx": "application/x-stuffitx", # Stuffit Archive
".skp": "application/vnd.koan", # SSEYO Koan Play File
".sldm": "application/vnd.ms-powerpoint.slide.macroenabled.12", # Microsoft PowerPoint - Macro-Enabled Open XML Slide
".sldx": "application/vnd.openxmlformats-officedocument.presentationml.slide", # Microsoft Office - OOXML - Presentation (Slide)
".slt": "application/vnd.epson.salt", # SimpleAnimeLite Player
".sm": "application/vnd.stepmania.stepchart", # StepMania
".smf": "application/vnd.stardivision.math", # StarOffice - Math
".smi": "application/smil+xml", # Synchronized Multimedia Integration Language
".snf": "application/x-font-snf", # Server Normal Format
".spf": "application/vnd.yamaha.smaf-phrase", # SMAF Phrase
".spl": "application/x-futuresplash", # FutureSplash Animator
".spot": "text/vnd.in3d.spot", # In3D - 3DML
".spp": "application/scvp-vp-response", # Server-Based Certificate Validation Protocol - Validation Policies - Response
".spq": "application/scvp-vp-request", # Server-Based Certificate Validation Protocol - Validation Policies - Request
".src": "application/x-wais-source", # WAIS Source
".sru": "application/sru+xml", # Search/Retrieve via URL Response Format
".srx": "application/sparql-results+xml", # SPARQL - Results
".sse": "application/vnd.kodak-descriptor", # Kodak Storyshare
".ssf": "application/vnd.epson.ssf", # QUASS Stream Player
".ssml": "application/ssml+xml", # Speech Synthesis Markup Language
".st": "application/vnd.sailingtracker.track", # SailingTracker
".stc": "application/vnd.sun.xml.calc.template", # OpenOffice - Calc Template (Spreadsheet)
".std": "application/vnd.sun.xml.draw.template", # OpenOffice - Draw Template (Graphics)
".stf": "application/vnd.wt.stf", # Worldtalk
".sti": "application/vnd.sun.xml.impress.template", # OpenOffice - Impress Template (Presentation)
".stk": "application/hyperstudio", # Hyperstudio
".stl": "application/vnd.ms-pki.stl", # Microsoft Trust UI Provider - Certificate Trust Link
".str": "application/vnd.pg.format", # Proprietary P&G Standard Reporting System
".stw": "application/vnd.sun.xml.writer.template", # OpenOffice - Writer Template (Text - HTML)
".sub": "image/vnd.dvb.subtitle", # Close Captioning - Subtitle
".sus": "application/vnd.sus-calendar", # ScheduleUs
".sv4cpio": "application/x-sv4cpio", # System V Release 4 CPIO Archive
".sv4crc": "application/x-sv4crc", # System V Release 4 CPIO Checksum Data
".svc": "application/vnd.dvb.service", # Digital Video Broadcasting
".svd": "application/vnd.svd", # SourceView Document
".svg": "image/svg+xml", # Scalable Vector Graphics (SVG)
".swf": "application/x-shockwave-flash", # Adobe Flash
".swi": "application/vnd.aristanetworks.swi", # Arista Networks Software Image
".sxc": "application/vnd.sun.xml.calc", # OpenOffice - Calc (Spreadsheet)
".sxd": "application/vnd.sun.xml.draw", # OpenOffice - Draw (Graphics)
".sxg": "application/vnd.sun.xml.writer.global", # OpenOffice - Writer (Text - HTML)
".sxi": "application/vnd.sun.xml.impress", # OpenOffice - Impress (Presentation)
".sxm": "application/vnd.sun.xml.math", # OpenOffice - Math (Formula)
".sxw": "application/vnd.sun.xml.writer", # OpenOffice - Writer (Text - HTML)
".t": "text/troff", # troff
".tao": "application/vnd.tao.intent-module-archive", # Tao Intent
".tar": "application/x-tar", # Tar File (Tape Archive)
".tcap": "application/vnd.3gpp2.tcap", # 3rd Generation Partnership Project - Transaction Capabilities Application Part
".tcl": "application/x-tcl", # Tcl Script
".teacher": "application/vnd.smart.teacher", # SMART Technologies Apps
".tei": "application/tei+xml", # Text Encoding and Interchange
".tex": "application/x-tex", # TeX
".texinfo": "application/x-texinfo", # GNU Texinfo Document
".tfi": "application/thraud+xml", # Sharing Transaction Fraud Data
".tfm": "application/x-tex-tfm", # TeX Font Metric
".thmx": "application/vnd.ms-officetheme", # Microsoft Office System Release Theme
".tiff": "image/tiff", # Tagged Image File Format
".tmo": "application/vnd.tmobile-livetv", # MobileTV
".torrent": "application/x-bittorrent", # BitTorrent
".tpl": "application/vnd.groove-tool-template", # Groove - Tool Template
".tpt": "application/vnd.trid.tpt", # TRI Systems Config
".tra": "application/vnd.trueapp", # True BASIC
".trm": "application/x-msterminal", # Microsoft Windows Terminal Services
".tsd": "application/timestamped-data", # Time Stamped Data Envelope
".tsv": "text/tab-separated-values", # Tab Seperated Values
".ttf": "application/x-font-ttf", # TrueType Font
".ttl": "text/turtle", # Turtle (Terse RDF Triple Language)
".twd": "application/vnd.simtech-mindmapper", # SimTech MindMapper
".txd": "application/vnd.genomatix.tuxedo", # Genomatix Tuxedo Framework
".txf": "application/vnd.mobius.txf", # Mobius Management Systems - Topic Index File
".txt": "text/plain", # Text File
".ufd": "application/vnd.ufdl", # Universal Forms Description Language
".umj": "application/vnd.umajin", # UMAJIN
".unityweb": "application/vnd.unity", # Unity 3d
".uoml": "application/vnd.uoml+xml", # Unique Object Markup Language
".uri": "text/uri-list", # URI Resolution Services
".ustar": "application/x-ustar", # Ustar (Uniform Standard Tape Archive)
".utz": "application/vnd.uiq.theme", # User Interface Quartz - Theme (Symbian)
".uu": "text/x-uuencode", # UUEncode
".uva": "audio/vnd.dece.audio", # DECE Audio
".uvh": "video/vnd.dece.hd", # DECE High Definition Video
".uvi": "image/vnd.dece.graphic", # DECE Graphic
".uvm": "video/vnd.dece.mobile", # DECE Mobile Video
".uvp": "video/vnd.dece.pd", # DECE PD Video
".uvs": "video/vnd.dece.sd", # DECE SD Video
".uvu": "video/vnd.uvvu.mp4", # DECE MP4
".uvv": "video/vnd.dece.video", # DECE Video
".vcd": "application/x-cdlink", # Video CD
".vcf": "text/x-vcard", # vCard
".vcg": "application/vnd.groove-vcard", # Groove - Vcard
".vcs": "text/x-vcalendar", # vCalendar
".vcx": "application/vnd.vcx", # VirtualCatalog
".vis": "application/vnd.visionary", # Visionary
".viv": "video/vnd.vivo", # Vivo
".vsd": "application/vnd.visio", # Microsoft Visio
".vsf": "application/vnd.vsf", # Viewport+
".vtu": "model/vnd.vtu", # Virtue VTU
".vxml": "application/voicexml+xml", # VoiceXML
".wad": "application/x-doom", # Doom Video Game
".wav": "audio/x-wav", # Waveform Audio File Format (WAV)
".wax": "audio/x-ms-wax", # Microsoft Windows Media Audio Redirector
".wbmp": "image/vnd.wap.wbmp", # WAP Bitamp (WBMP)
".wbs": "application/vnd.criticaltools.wbs+xml", # Critical Tools - PERT Chart EXPERT
".wbxml": "application/vnd.wap.wbxml", # WAP Binary XML (WBXML)
".weba": "audio/webm", # Open Web Media Project - Audio
".webm": "video/webm", # Open Web Media Project - Video
".webp": "image/webp", # WebP Image
".wg": "application/vnd.pmi.widget", # Qualcomm's Plaza Mobile Internet
".wgt": "application/widget", # Widget Packaging and XML Configuration
".wm": "video/x-ms-wm", # Microsoft Windows Media
".wma": "audio/x-ms-wma", # Microsoft Windows Media Audio
".wmd": "application/x-ms-wmd", # Microsoft Windows Media Player Download Package
".wmf": "application/x-msmetafile", # Microsoft Windows Metafile
".wml": "text/vnd.wap.wml", # Wireless Markup Language (WML)
".wmlc": "application/vnd.wap.wmlc", # Compiled Wireless Markup Language (WMLC)
".wmls": "text/vnd.wap.wmlscript", # Wireless Markup Language Script (WMLScript)
".wmlsc": "application/vnd.wap.wmlscriptc", # WMLScript
".wmv": "video/x-ms-wmv", # Microsoft Windows Media Video
".wmx": "video/x-ms-wmx", # Microsoft Windows Media Audio/Video Playlist
".wmz": "application/x-ms-wmz", # Microsoft Windows Media Player Skin Package
".woff": "application/x-font-woff", # Web Open Font Format
".wpd": "application/vnd.wordperfect", # Wordperfect
".wpl": "application/vnd.ms-wpl", # Microsoft Windows Media Player Playlist
".wps": "application/vnd.ms-works", # Microsoft Works
".wqd": "application/vnd.wqd", # SundaHus WQ
".wri": "application/x-mswrite", # Microsoft Wordpad
".wrl": "model/vrml", # Virtual Reality Modeling Language
".wsdl": "application/wsdl+xml", # WSDL - Web Services Description Language
".wspolicy": "application/wspolicy+xml", # Web Services Policy
".wtb": "application/vnd.webturbo", # WebTurbo
".wvx": "video/x-ms-wvx", # Microsoft Windows Media Video Playlist
".x3d": "application/vnd.hzn-3d-crossword", # 3D Crossword Plugin
".xap": "application/x-silverlight-app", # Microsoft Silverlight
".xar": "application/vnd.xara", # CorelXARA
".xbap": "application/x-ms-xbap", # Microsoft XAML Browser Application
".xbd": "application/vnd.fujixerox.docuworks.binder", # Fujitsu - Xerox DocuWorks Binder
".xbm": "image/x-xbitmap", # X BitMap
".xdf": "application/xcap-diff+xml", # XML Configuration Access Protocol - XCAP Diff
".xdm": "application/vnd.syncml.dm+xml", # SyncML - Device Management
".xdp": "application/vnd.adobe.xdp+xml", # Adobe XML Data Package
".xdssc": "application/dssc+xml", # Data Structure for the Security Suitability of Cryptographic Algorithms
".xdw": "application/vnd.fujixerox.docuworks", # Fujitsu - Xerox DocuWorks
".xenc": "application/xenc+xml", # XML Encryption Syntax and Processing
".xer": "application/patch-ops-error+xml", # XML Patch Framework
".xfdf": "application/vnd.adobe.xfdf", # Adobe XML Forms Data Format
".xfdl": "application/vnd.xfdl", # Extensible Forms Description Language
".xhtml": "application/xhtml+xml", # XHTML - The Extensible HyperText Markup Language
".xif": "image/vnd.xiff", # eXtended Image File Format (XIFF)
".xlam": "application/vnd.ms-excel.addin.macroenabled.12", # Microsoft Excel - Add-In File
".xls": "application/vnd.ms-excel", # Microsoft Excel
".xlsb": "application/vnd.ms-excel.sheet.binary.macroenabled.12", # Microsoft Excel - Binary Workbook
".xlsm": "application/vnd.ms-excel.sheet.macroenabled.12", # Microsoft Excel - Macro-Enabled Workbook
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", # Microsoft Office - OOXML - Spreadsheet
".xltm": "application/vnd.ms-excel.template.macroenabled.12", # Microsoft Excel - Macro-Enabled Template File
".xltx": "application/vnd.openxmlformats-officedocument.spreadsheetml.template", # Microsoft Office - OOXML - Spreadsheet Teplate
".xml": "application/xml", # XML - Extensible Markup Language
".xo": "application/vnd.olpc-sugar", # Sugar Linux Application Bundle
".xop": "application/xop+xml", # XML-Binary Optimized Packaging
".xpi": "application/x-xpinstall", # XPInstall - Mozilla
".xpm": "image/x-xpixmap", # X PixMap
".xpr": "application/vnd.is-xpr", # Express by Infoseek
".xps": "application/vnd.ms-xpsdocument", # Microsoft XML Paper Specification
".xpw": "application/vnd.intercon.formnet", # Intercon FormNet
".xslt": "application/xslt+xml", # XML Transformations
".xsm": "application/vnd.syncml+xml", # SyncML
".xspf": "application/xspf+xml", # XSPF - XML Shareable Playlist Format
".xul": "application/vnd.mozilla.xul+xml", # XUL - XML User Interface Language
".xwd": "image/x-xwindowdump", # X Window Dump
".xyz": "chemical/x-xyz", # XYZ File Format
".yaml": "text/yaml", # YAML Ain't Markup Language / Yet Another Markup Language
".yang": "application/yang", # YANG Data Modeling Language
".yin": "application/yin+xml", # YIN (YANG - XML)
".zaz": "application/vnd.zzazz.deck+xml", # Zzazz Deck
".zip": "application/zip", # Zip Archive
".zir": "application/vnd.zul", # Z.U.L. Geometry
".zmm": "application/vnd.handheld-entertainment+xml" # ZVUE Media Manager
}
return m.get(ext, df)
| 50,482
| 0
| 115
|
a6f3000b839dcc7f905743f2233883d51f46e361
| 709
|
py
|
Python
|
tests/test_runeterra_api.py
|
scary987/twisted_fate
|
654120e2a03faa5bc5568c4264beddca31e43a31
|
[
"MIT"
] | 13
|
2019-11-22T03:59:08.000Z
|
2021-05-25T22:17:53.000Z
|
tests/test_runeterra_api.py
|
scary987/twisted_fate
|
654120e2a03faa5bc5568c4264beddca31e43a31
|
[
"MIT"
] | 7
|
2019-10-25T16:16:33.000Z
|
2021-02-25T04:30:36.000Z
|
tests/test_runeterra_api.py
|
Snowcola/runeterra
|
744b6c777ebdfdf8f07c5c699f63462152aca947
|
[
"MIT"
] | 5
|
2020-02-05T17:00:55.000Z
|
2021-01-18T06:25:22.000Z
|
from twisted_fate import __version__
| 22.870968
| 67
| 0.468265
|
from twisted_fate import __version__
def test_version():
assert __version__ == '0.1.3'
def test_deck():
import twisted_fate as api
deck = {
"DeckCode": "CEBAGAIDCQRSOCQBAQAQYDISDQTCOKBNGQAACAIBAMFQ",
"CardsInDeck": {
"01NX020": 3,
"01NX035": 3,
"01NX039": 3,
"01PZ001": 3,
"01PZ012": 3,
"01PZ013": 3,
"01PZ018": 3,
"01PZ028": 3,
"01PZ038": 3,
"01PZ039": 3,
"01PZ040": 3,
"01PZ045": 3,
"01PZ052": 3,
"01NX011": 1,
},
}
z = api.Deck(**deck)
assert z.cards[0].cardCode in deck["CardsInDeck"]
| 625
| 0
| 46
|
195a25357f476fe8e429e196a9bdfe546f4e7d79
| 984
|
py
|
Python
|
tests/test_executor.py
|
wyriwyd/wyriwyd
|
8c011cd02a3499b60b71486f210869e0971793e2
|
[
"BSD-3-Clause"
] | 7
|
2019-04-03T12:40:38.000Z
|
2019-04-03T20:55:41.000Z
|
tests/test_executor.py
|
wyriwyd/wyriwyd
|
8c011cd02a3499b60b71486f210869e0971793e2
|
[
"BSD-3-Clause"
] | 12
|
2019-04-03T09:18:20.000Z
|
2019-04-08T09:20:44.000Z
|
tests/test_executor.py
|
wyriwyd/wyriwyd
|
8c011cd02a3499b60b71486f210869e0971793e2
|
[
"BSD-3-Clause"
] | 1
|
2019-04-03T09:33:32.000Z
|
2019-04-03T09:33:32.000Z
|
import os
from wyriwyd.executor import ShellExecutor
| 29.818182
| 66
| 0.595528
|
import os
from wyriwyd.executor import ShellExecutor
def test_executor(tmpdir):
cmds = ["export MY_VARIABLE='hello world'",
"cd {}".format(str(tmpdir)),
"pwd",
"echo MY_VARIABLE = $MY_VARIABLE",
"pushd ../ \n pwd \n popd"]
outputs = []
with ShellExecutor() as executor:
for command in cmds:
outputs.append(executor.run_command(command))
assert outputs[:3] == [[], [], [str(tmpdir)]]
assert outputs[3] == ["MY_VARIABLE = hello world"]
cmd_out = "{0} {1}:{0}:{1}"
cmd_out = cmd_out.format(os.path.dirname(tmpdir), str(tmpdir))
assert outputs[4] == cmd_out.split(":")
def test_cat(tmpdir):
filename = tmpdir / "contents.txt"
with open(filename, "w") as outfile:
outfile.write("hello file\n")
cmds = [f"cat {filename}"]
outputs = []
with ShellExecutor() as executor:
for command in cmds:
outputs.append(executor.run_command(command))
| 883
| 0
| 46
|
0096ee546ed0b0057c7f17a668314161d5695b54
| 11,196
|
py
|
Python
|
astroduet/duet_telescope.py
|
bwgref/duet-astro
|
4fe3358bb927c0f03de1b75c01ddf2379b5771b3
|
[
"BSD-3-Clause"
] | 1
|
2019-04-15T21:02:57.000Z
|
2019-04-15T21:02:57.000Z
|
astroduet/duet_telescope.py
|
bwgref/duet-astro
|
4fe3358bb927c0f03de1b75c01ddf2379b5771b3
|
[
"BSD-3-Clause"
] | null | null | null |
astroduet/duet_telescope.py
|
bwgref/duet-astro
|
4fe3358bb927c0f03de1b75c01ddf2379b5771b3
|
[
"BSD-3-Clause"
] | 1
|
2019-04-17T19:46:42.000Z
|
2019-04-17T19:46:42.000Z
|
import os
curdir = os.path.dirname(__file__)
datadir = os.path.join(curdir, 'data')
def load_telescope_parameters(version, **kwargs):
"""
Utility script to load the telescope parameters
version = 0: Pre-design version (to compare with Rick's stuff)
version = 1: 210 mm design
version = 2: 300 mm design
version = 3: 350 mm design
version = 4: 400 mm design
###
### Version 2:
Syntax:
diameter, qe, psf_fwhm, pixel_size, efficiency = load_telescope_parameters(version)
---
Note, going to depreicate versions < 4 eventually since those assume that
the pixels are 0.5 * pixel size
To be done: Remove QE from this method and put it somewhere else.
---
"""
import astropy.units as ur
from numpy import pi
diag = kwargs.pop('diag', True)
name = ''
# Eventually depricate these things
if version == 0:
qe = 0.8 # To be improved later.
diameter = 30*ur.cm
psf_fwhm = 10*ur.arcsec
pixel_size = psf_fwhm * 0.5
efficiency = 0.87 # Ultrasat spec
if version == 1:
qe = 0.8
efficiency = 0.54 # Reported from Mike
diameter = 21 * ur.cm
psf_fwhm = 4 * ur.arcsec
pixel_size = psf_fwhm * 0.5
if version == 2:
qe = 0.8
efficiency = 0.65 # Reported from Mike
diameter = 30 * ur.cm
psf_fwhm = 9*ur.arcsec
pixel_size = psf_fwhm * 0.5
if version == 3:
qe = 0.8
diameter = 35*ur.cm
efficiency = 0.67 # Reported from Mike
psf_fwhm = 18*ur.arcsec
pixel_size = psf_fwhm * 0.5
if version == 4:
qe = 0.8
diameter = 40*ur.cm
efficiency = 0.70 # Reported from Mike
psf_fwhm = 23*ur.arcsec
pixel_size = psf_fwhm * 0.5
# Versions below here allow the PSF and the pixel to be decoupled
# "Big Schmidt" w/ 6k x 6k array
if version == 5:
name = 'Big Schmidt'
qe = 0.7
diameter = 33.0*ur.cm
eff_diam = 29.1*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.43 # arcsec per micron
psf_fwhm_um = 21.6 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Smaller Schmidts (same focal length?) each with 6k x 6k focal plane array
if version == 6:
name = 'Two mini Big Schmidts'
qe = 0.7
diameter = 21.0*ur.cm
eff_diam = 15.1*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.43 # arcsec per micron
psf_fwhm_um = 6.7 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Medium Schmidt (same focal length?) each with 6k x 6k focal plane array
if version == 7:
name = 'Medium Schmidt'
qe = 0.7
diameter = 24.0*ur.cm
eff_diam = 19.1*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.43 # arcsec per micron
psf_fwhm_um = 7.6 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Smaller Medium Schmidts (same focal length?) each with 6k x 6k focal plane array
if version == 8:
name = 'Two Small "Medium" Schmidts'
qe = 0.7
diameter = 14.0*ur.cm
eff_diam = 6.3*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.43 # arcsec per micron
psf_fwhm_um = 8.6 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Fast Medium Schmidts (same focal length?) each with 6k x 6k focal plane array
if version == 9:
name = 'Fast Schmidt'
qe = 0.7
diameter = 32.0*ur.cm
eff_diam = 29.89*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.64 # arcsec per micron
psf_fwhm_um = 44.3 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Mini-fast Schmidts
if version == 10:
name="Mini Fast Schmidts"
qe = 0.7
diameter = 22.0*ur.cm
eff_diam = 19.2*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.64 # arcsec per micron
psf_fwhm_um = 14.1 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
##### Second round of telescope designs
if version == 11:
name="Small Focal Plane CMOS"
qe = 0.6
diameter = 26.0*ur.cm
eff_diam = 23.1*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 6.4/10. # arcsec per micron
psf_fwhm_um = 6.7 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
if version == 12:
name="Swiss Cross CMOS"
qe = 0.6
diameter = 30.*ur.cm
eff_diam = 21.7*ur.cm
eff_diam = 24.7*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 4.0/10. # arcsec per micron
psf_fwhm_um = 7.2 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10. * ur.arcsec
if version == 13:
name="Swiss Cross CCD"
qe = 0.6
diameter = 30.*ur.cm
eff_diam = 20.2*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 5.4/13. # arcsec per micron
psf_fwhm_um = 16.1 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 13 * ur.arcsec
if version == 14:
name="Medium Focal Plane (CMOS 6k x 6k)"
qe = 0.6
diameter = 30.*ur.cm
eff_diam = 0.7*27.3*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 4.3/10. # arcsec per micron
psf_fwhm_um = 7.1 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
######
if version == 15:
name="25 cm primary"
qe = 0.6
diameter = 20.*ur.cm
eff_diam = 17*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 6.4/10. # arcsec per micron
psf_fwhm_um = 10.3 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
if diag:
print('Telescope Configuration {}'.format(version))
print('Name: {}'.format(name))
print('Entrance Pupil diameter {}'.format(diameter))
print('Optical Efficiency {}'.format(efficiency))
print('PSF FWHM {}'.format(psf_fwhm))
print('Pixel size {}'.format(pixel_size))
print('Effective Aperture {}'.format(diameter*(efficiency)**0.5))
print('Effective Area {}'.format( efficiency * pi * (0.5*diameter)**2))
return diameter, qe, psf_fwhm, pixel_size, efficiency
def load_qe(**kwargs):
"""
Loads the detector QE and returns the values.
band = 1 (default, 180-220 nm)
band = 2 (260-320 nm)
band = 3 (340-380 nm)
Syntax:
wave, qe = load_qe(band = 1)
"""
import astropy.units as ur
import numpy as np
band = kwargs.pop('band', 1)
diag = kwargs.pop('diag', False)
if band == 1:
infile = os.path.join(datadir, 'detector_180_220nm.csv')
if band == 2:
infile = os.path.join(datadir, 'detector_260_300nm.csv')
if band == 3:
infile = os.path.join(datadir, 'detector_340_380nm.csv')
f = open(infile, 'r')
header = True
qe = {}
set = False
for line in f:
if header:
header = False
continue
fields = line.split(',')
if not set:
wave = float(fields[0])
qe = float(fields[3])
set = True
else:
wave = np.append(wave, float(fields[0]))
qe = np.append(qe, float(fields[3]))
f.close()
# Give wavelength a unit
wave *= ur.nm
if diag:
print('Detector Q.E. loader')
print('Band {} has input file {}'.format(band, infile))
return wave, qe / 100.
def load_reflectivity(**kwargs):
"""
Loads the optics reflectivity and returns the values.
Syntax:
wave, reflectivity = load_reflectivity()
"""
import astropy.units as ur
import numpy as np
diag = kwargs.pop('diag', False)
infile = os.path.join(datadir, 'al_mgf2_mirror_coatings.csv')
f = open(infile, 'r')
header = True
qe = {}
set = False
for line in f:
if header:
header = False
continue
fields = line.split(',')
if not set:
wave = float(fields[0])
reflectivity = float(fields[1])
set = True
else:
wave = np.append(wave, float(fields[0]))
reflectivity = np.append(reflectivity, float(fields[1]))
f.close()
# Give wavelength a unit
wave *= ur.nm
if diag:
print('Optics reflectivity loader')
print('Input file {}'.format(infile))
return wave, reflectivity
def load_redfilter(**kwargs):
"""
Loads the detector QE and returns the values.
band = 1 (default, 180-220 nm)
band = 2 (260-320 nm)
Syntax:
wave, transmission = load_redfilter(band=1)
"""
import astropy.units as ur
import numpy as np
band = kwargs.pop('band', 1)
diag = kwargs.pop('diag', False)
light = kwargs.pop('light', True)
if light:
infile = os.path.join(datadir, 'duet{}_filter_light.csv'.format(band))
else:
infile = os.path.join(datadir, 'duet{}_filter.csv'.format(band))
f = open(infile, 'r')
header = True
qe = {}
set = False
for line in f:
if header:
if (line.startswith('Wavelength')) or ('%T' in line):
header = False
continue
fields = line.split(',')
if not set:
wave = float(fields[0])
transmission = float(fields[1])
set = True
else:
wave = np.append(wave, float(fields[0]))
transmission = np.append(transmission, float(fields[1]))
f.close()
# Give wavelength a unit
wave *= ur.nm
if diag:
print('Red filter loader')
print('Band {} has input file {}'.format(band, infile))
return wave, transmission / 100.
def apply_filters(wave, spec, **kwargs):
"""
Loads the detector QE and returns the values.
band = 1 (default, 180-220 nm)
band = 2 (260-320 nm)
Syntax:
wave, transmission = load_redfilter(band=1)
"""
from .apply_transmission import apply_trans
# Load filters
ref_wave, reflectivity = load_reflectivity(**kwargs)
qe_wave, qe = load_qe(**kwargs)
red_wave, red_trans = load_redfilter(**kwargs)
ref_flux = apply_trans(wave, spec, ref_wave, reflectivity/100.)
qe_flux = apply_trans(wave, ref_flux, qe_wave, qe)
band_flux = apply_trans(wave, qe_flux, red_wave, red_trans)
return band_flux
| 24.445415
| 87
| 0.575473
|
import os
curdir = os.path.dirname(__file__)
datadir = os.path.join(curdir, 'data')
def load_telescope_parameters(version, **kwargs):
"""
Utility script to load the telescope parameters
version = 0: Pre-design version (to compare with Rick's stuff)
version = 1: 210 mm design
version = 2: 300 mm design
version = 3: 350 mm design
version = 4: 400 mm design
###
### Version 2:
Syntax:
diameter, qe, psf_fwhm, pixel_size, efficiency = load_telescope_parameters(version)
---
Note, going to depreicate versions < 4 eventually since those assume that
the pixels are 0.5 * pixel size
To be done: Remove QE from this method and put it somewhere else.
---
"""
import astropy.units as ur
from numpy import pi
diag = kwargs.pop('diag', True)
name = ''
# Eventually depricate these things
if version == 0:
qe = 0.8 # To be improved later.
diameter = 30*ur.cm
psf_fwhm = 10*ur.arcsec
pixel_size = psf_fwhm * 0.5
efficiency = 0.87 # Ultrasat spec
if version == 1:
qe = 0.8
efficiency = 0.54 # Reported from Mike
diameter = 21 * ur.cm
psf_fwhm = 4 * ur.arcsec
pixel_size = psf_fwhm * 0.5
if version == 2:
qe = 0.8
efficiency = 0.65 # Reported from Mike
diameter = 30 * ur.cm
psf_fwhm = 9*ur.arcsec
pixel_size = psf_fwhm * 0.5
if version == 3:
qe = 0.8
diameter = 35*ur.cm
efficiency = 0.67 # Reported from Mike
psf_fwhm = 18*ur.arcsec
pixel_size = psf_fwhm * 0.5
if version == 4:
qe = 0.8
diameter = 40*ur.cm
efficiency = 0.70 # Reported from Mike
psf_fwhm = 23*ur.arcsec
pixel_size = psf_fwhm * 0.5
# Versions below here allow the PSF and the pixel to be decoupled
# "Big Schmidt" w/ 6k x 6k array
if version == 5:
name = 'Big Schmidt'
qe = 0.7
diameter = 33.0*ur.cm
eff_diam = 29.1*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.43 # arcsec per micron
psf_fwhm_um = 21.6 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Smaller Schmidts (same focal length?) each with 6k x 6k focal plane array
if version == 6:
name = 'Two mini Big Schmidts'
qe = 0.7
diameter = 21.0*ur.cm
eff_diam = 15.1*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.43 # arcsec per micron
psf_fwhm_um = 6.7 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Medium Schmidt (same focal length?) each with 6k x 6k focal plane array
if version == 7:
name = 'Medium Schmidt'
qe = 0.7
diameter = 24.0*ur.cm
eff_diam = 19.1*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.43 # arcsec per micron
psf_fwhm_um = 7.6 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Smaller Medium Schmidts (same focal length?) each with 6k x 6k focal plane array
if version == 8:
name = 'Two Small "Medium" Schmidts'
qe = 0.7
diameter = 14.0*ur.cm
eff_diam = 6.3*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.43 # arcsec per micron
psf_fwhm_um = 8.6 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Fast Medium Schmidts (same focal length?) each with 6k x 6k focal plane array
if version == 9:
name = 'Fast Schmidt'
qe = 0.7
diameter = 32.0*ur.cm
eff_diam = 29.89*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.64 # arcsec per micron
psf_fwhm_um = 44.3 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
# Mini-fast Schmidts
if version == 10:
name="Mini Fast Schmidts"
qe = 0.7
diameter = 22.0*ur.cm
eff_diam = 19.2*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 0.64 # arcsec per micron
psf_fwhm_um = 14.1 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
##### Second round of telescope designs
if version == 11:
name="Small Focal Plane CMOS"
qe = 0.6
diameter = 26.0*ur.cm
eff_diam = 23.1*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 6.4/10. # arcsec per micron
psf_fwhm_um = 6.7 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
if version == 12:
name="Swiss Cross CMOS"
qe = 0.6
diameter = 30.*ur.cm
eff_diam = 21.7*ur.cm
eff_diam = 24.7*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 4.0/10. # arcsec per micron
psf_fwhm_um = 7.2 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10. * ur.arcsec
if version == 13:
name="Swiss Cross CCD"
qe = 0.6
diameter = 30.*ur.cm
eff_diam = 20.2*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 5.4/13. # arcsec per micron
psf_fwhm_um = 16.1 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 13 * ur.arcsec
if version == 14:
name="Medium Focal Plane (CMOS 6k x 6k)"
qe = 0.6
diameter = 30.*ur.cm
eff_diam = 0.7*27.3*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 4.3/10. # arcsec per micron
psf_fwhm_um = 7.1 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
######
if version == 15:
name="25 cm primary"
qe = 0.6
diameter = 20.*ur.cm
eff_diam = 17*ur.cm
efficiency = (eff_diam/diameter)**2
plate_scale = 6.4/10. # arcsec per micron
psf_fwhm_um = 10.3 # microns
psf_fwhm = plate_scale * psf_fwhm_um * ur.arcsec
pixel_size = plate_scale * 10 * ur.arcsec
if diag:
print('Telescope Configuration {}'.format(version))
print('Name: {}'.format(name))
print('Entrance Pupil diameter {}'.format(diameter))
print('Optical Efficiency {}'.format(efficiency))
print('PSF FWHM {}'.format(psf_fwhm))
print('Pixel size {}'.format(pixel_size))
print('Effective Aperture {}'.format(diameter*(efficiency)**0.5))
print('Effective Area {}'.format( efficiency * pi * (0.5*diameter)**2))
return diameter, qe, psf_fwhm, pixel_size, efficiency
def load_qe(**kwargs):
"""
Loads the detector QE and returns the values.
band = 1 (default, 180-220 nm)
band = 2 (260-320 nm)
band = 3 (340-380 nm)
Syntax:
wave, qe = load_qe(band = 1)
"""
import astropy.units as ur
import numpy as np
band = kwargs.pop('band', 1)
diag = kwargs.pop('diag', False)
if band == 1:
infile = os.path.join(datadir, 'detector_180_220nm.csv')
if band == 2:
infile = os.path.join(datadir, 'detector_260_300nm.csv')
if band == 3:
infile = os.path.join(datadir, 'detector_340_380nm.csv')
f = open(infile, 'r')
header = True
qe = {}
set = False
for line in f:
if header:
header = False
continue
fields = line.split(',')
if not set:
wave = float(fields[0])
qe = float(fields[3])
set = True
else:
wave = np.append(wave, float(fields[0]))
qe = np.append(qe, float(fields[3]))
f.close()
# Give wavelength a unit
wave *= ur.nm
if diag:
print('Detector Q.E. loader')
print('Band {} has input file {}'.format(band, infile))
return wave, qe / 100.
def load_reflectivity(**kwargs):
"""
Loads the optics reflectivity and returns the values.
Syntax:
wave, reflectivity = load_reflectivity()
"""
import astropy.units as ur
import numpy as np
diag = kwargs.pop('diag', False)
infile = os.path.join(datadir, 'al_mgf2_mirror_coatings.csv')
f = open(infile, 'r')
header = True
qe = {}
set = False
for line in f:
if header:
header = False
continue
fields = line.split(',')
if not set:
wave = float(fields[0])
reflectivity = float(fields[1])
set = True
else:
wave = np.append(wave, float(fields[0]))
reflectivity = np.append(reflectivity, float(fields[1]))
f.close()
# Give wavelength a unit
wave *= ur.nm
if diag:
print('Optics reflectivity loader')
print('Input file {}'.format(infile))
return wave, reflectivity
def load_redfilter(**kwargs):
"""
Loads the detector QE and returns the values.
band = 1 (default, 180-220 nm)
band = 2 (260-320 nm)
Syntax:
wave, transmission = load_redfilter(band=1)
"""
import astropy.units as ur
import numpy as np
band = kwargs.pop('band', 1)
diag = kwargs.pop('diag', False)
light = kwargs.pop('light', True)
if light:
infile = os.path.join(datadir, 'duet{}_filter_light.csv'.format(band))
else:
infile = os.path.join(datadir, 'duet{}_filter.csv'.format(band))
f = open(infile, 'r')
header = True
qe = {}
set = False
for line in f:
if header:
if (line.startswith('Wavelength')) or ('%T' in line):
header = False
continue
fields = line.split(',')
if not set:
wave = float(fields[0])
transmission = float(fields[1])
set = True
else:
wave = np.append(wave, float(fields[0]))
transmission = np.append(transmission, float(fields[1]))
f.close()
# Give wavelength a unit
wave *= ur.nm
if diag:
print('Red filter loader')
print('Band {} has input file {}'.format(band, infile))
return wave, transmission / 100.
def apply_filters(wave, spec, **kwargs):
"""
Loads the detector QE and returns the values.
band = 1 (default, 180-220 nm)
band = 2 (260-320 nm)
Syntax:
wave, transmission = load_redfilter(band=1)
"""
from .apply_transmission import apply_trans
# Load filters
ref_wave, reflectivity = load_reflectivity(**kwargs)
qe_wave, qe = load_qe(**kwargs)
red_wave, red_trans = load_redfilter(**kwargs)
ref_flux = apply_trans(wave, spec, ref_wave, reflectivity/100.)
qe_flux = apply_trans(wave, ref_flux, qe_wave, qe)
band_flux = apply_trans(wave, qe_flux, red_wave, red_trans)
return band_flux
| 0
| 0
| 0
|
43d9adbbb5040d1c28b04d0a61b25bd07c383123
| 48
|
py
|
Python
|
collada_wt/__init__.py
|
charlie9578/wind-turbine-kml
|
297b3d25672e82456485387bbb4e9a97873cf136
|
[
"BSD-3-Clause"
] | null | null | null |
collada_wt/__init__.py
|
charlie9578/wind-turbine-kml
|
297b3d25672e82456485387bbb4e9a97873cf136
|
[
"BSD-3-Clause"
] | null | null | null |
collada_wt/__init__.py
|
charlie9578/wind-turbine-kml
|
297b3d25672e82456485387bbb4e9a97873cf136
|
[
"BSD-3-Clause"
] | null | null | null |
from collada_wt.collada_wt import create_turbine
| 48
| 48
| 0.916667
|
from collada_wt.collada_wt import create_turbine
| 0
| 0
| 0
|
056b231605a31182c39245326f875993e680cc1d
| 4,073
|
py
|
Python
|
model/backbone.py
|
PaperCodeReview/DETR-TF
|
8f9fc3e06c20269044967718847c794606e25d10
|
[
"MIT"
] | 3
|
2020-10-01T10:15:46.000Z
|
2021-04-20T03:33:00.000Z
|
model/backbone.py
|
PaperCodeReview/DETR-TF
|
8f9fc3e06c20269044967718847c794606e25d10
|
[
"MIT"
] | null | null | null |
model/backbone.py
|
PaperCodeReview/DETR-TF
|
8f9fc3e06c20269044967718847c794606e25d10
|
[
"MIT"
] | null | null | null |
from typing import Dict
import tensorflow as tf
from model.position_encoding import build_position_encoding
| 37.027273
| 101
| 0.558802
|
from typing import Dict
import tensorflow as tf
from model.position_encoding import build_position_encoding
class FrozenBatchNorm2D(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(FrozenBatchNorm2D, self).__init__(**kwargs)
def build(self, input_shape):
self.weight = self.add_weight(
name="weight", shape=[input_shape[-1]], initializer="ones", trainable=False
)
self.bias = self.add_weight(
name="bias", shape=[input_shape[-1]], initializer="zeros", trainable=False
)
self.running_mean = self.add_weight(
name="running_mean", shape=[input_shape[-1]], initializer="zeros", trainable=False
)
self.running_var = self.add_weight(
name="running_var", shape=[input_shape[-1]], initializer="ones", trainable=False
)
def call(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = tf.reshape(self.weight, (1, 1, 1, -1))
b = tf.reshape(self.bias, (1, 1, 1, -1))
rv = tf.reshape(self.running_var, (1, 1, 1, -1))
rm = tf.reshape(self.running_mean, (1, 1, 1, -1))
eps = 1e-5
scale = w * tf.math.rsqrt(rv + eps)
bias = b - rm * scale
return x * scale + bias
class BackboneBase(tf.keras.Model):
def __init__(self,
backbone: tf.keras.Model,
train_backbone: bool,
num_channels: int,
return_interm_layers: bool,
**kwargs):
super(BackboneBase, self).__init__(**kwargs)
for layer in backbone.layers:
if not train_backbone:
layer.trainable = False
self.body = backbone
self.num_channels = num_channels
def call(self, inputs: Dict):
xs = self.body(inputs['img'])
out = {}
for name, x in xs.items():
m = inputs['mask']
assert m is not None
m = tf.cast(m, tf.float32)
mask = tf.cast(tf.image.resize(m, x.shape[1:-1], method='nearest'), tf.bool)
out[name] = {'img': x, 'mask': mask}
return out
class Backbone(BackboneBase):
def __init__(self,
name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool,
**kwargs):
if name == 'resnet50':
from model.resnet import ResNet50Backbone as b
elif name == 'resnet101':
from model.resnet import ResNet101Backbone as b
backbone = b(return_interm_layers=return_interm_layers,
replace_stride_with_dilation=[False, False, dilation])
num_channels = 512 if name in ['resnet18', 'resnet34'] else 2048
super(Backbone, self).__init__(backbone, train_backbone, num_channels, return_interm_layers)
class Joiner(tf.keras.Model):
def __init__(self,
backbone: tf.keras.Model,
position_embedding: tf.keras.Model,
**kwargs):
super(Joiner, self).__init__(**kwargs)
self.backbone = backbone
self.position_embedding = position_embedding
def call(self, inputs):
xs = self.backbone(inputs)
out = []
pos = []
for name, x in xs.items():
out.append((name, x))
pos.append((name, tf.cast(self.position_embedding(x), tf.float32)))
return out, pos
def build_backbone(args):
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
model = Joiner(backbone, position_embedding)
model.num_channels = backbone.num_channels
return model
| 3,537
| 56
| 360
|
9e39e0c0c35e473517bcf3aa9aba3fd648b29f77
| 472
|
py
|
Python
|
urls.py
|
zachsnyder1/geopost
|
067c79b24deb0b99477513c5d684f6ac92d60dbe
|
[
"MIT"
] | 1
|
2016-04-22T20:35:31.000Z
|
2016-04-22T20:35:31.000Z
|
urls.py
|
zachsnyder1/geopost
|
067c79b24deb0b99477513c5d684f6ac92d60dbe
|
[
"MIT"
] | null | null | null |
urls.py
|
zachsnyder1/geopost
|
067c79b24deb0b99477513c5d684f6ac92d60dbe
|
[
"MIT"
] | null | null | null |
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'^$', views.Home.as_view(), name='geopost_home'),
re_path(r'^entry/$', views.Entry.as_view(), name='geopost_entry'),
re_path(r'^photo/(?P<entry_uuid>[0-9A-Fa-f-]+)$',
views.photo,
name="geopost_photo"),
re_path(r'^delete/$', views.delete, name='geopost_delete'),
re_path(r'^vantechy/$', views.vantechy, name='geopost_vantechy')
]
| 36.307692
| 74
| 0.612288
|
from django.urls import re_path
from . import views
urlpatterns = [
re_path(r'^$', views.Home.as_view(), name='geopost_home'),
re_path(r'^entry/$', views.Entry.as_view(), name='geopost_entry'),
re_path(r'^photo/(?P<entry_uuid>[0-9A-Fa-f-]+)$',
views.photo,
name="geopost_photo"),
re_path(r'^delete/$', views.delete, name='geopost_delete'),
re_path(r'^vantechy/$', views.vantechy, name='geopost_vantechy')
]
| 0
| 0
| 0
|
1dc2c031015cfc8534eb01331cac24b95caf681f
| 2,752
|
py
|
Python
|
flaskexerc/deptoDAO.py
|
adrianpastore/LoginDS2
|
ea126fadd31c97ffea048a3a85ebd45f9c383ad9
|
[
"MIT"
] | null | null | null |
flaskexerc/deptoDAO.py
|
adrianpastore/LoginDS2
|
ea126fadd31c97ffea048a3a85ebd45f9c383ad9
|
[
"MIT"
] | null | null | null |
flaskexerc/deptoDAO.py
|
adrianpastore/LoginDS2
|
ea126fadd31c97ffea048a3a85ebd45f9c383ad9
|
[
"MIT"
] | null | null | null |
from depto import departamento
import psycopg2
#except ValueError:
# print('Valor não encontrado.')
#except psycopg2.Error as e:
# print(e.pgerror)
| 37.69863
| 180
| 0.555959
|
from depto import departamento
import psycopg2
class deptoDao:
def __init__(self):
self._dados_con = "dbname=ds2aula host=localhost user=postgres password=postgres port=5432"
def inserir(self, departamento):
with psycopg2.connect(self._dados_con) as conn:
cur = conn.cursor()
sql = cur.execute('INSERT INTO "departamento" (nome, "dataHoraAtualizacao") VALUES (%s, now()) returning codDepartamento', [departamento.nome])
adicionado = cur.fetchone()
departamento.codDepartamento = adicionado[0]
conn.commit()
cur.close()
def excluir(self, cod):
#try:
with psycopg2.connect(self._dados_con) as conn:
cur = conn.cursor()
sql = cur.execute('DELETE FROM "departamento" WHERE "codDepartamento" = (%s)',[cod])
conn.commit()
cur.close()
#except ValueError:
# print('Valor não encontrado.')
def alterar(self, departamento):
#try:
with psycopg2.connect(self._dados_con) as conn:
cur = conn.cursor()
sql = cur.execute('UPDATE "departamento" SET nome = %s, "dataAtualizacao" = now() WHERE "codDepartamento" = (%s)',[departamento.nome, departamento.codDepartamento])
conn.commit()
cur.close()
#except psycopg2.Error as e:
# print(e.pgerror)
def buscar(self, cod):
#try:
with psycopg2.connect(self._dados_con) as conn:
cur = conn.cursor()
sql = cur.execute('SELECT * FROM "departamento" WHERE "codDepartamento" = (%s)',[cod])
busca = cur.fetchall()
Departamento = departamento(busca[0][1])
Departamento.codDept = cod
cur.close()
return Departamento
#except Exception:
# print('Deu ruim')
# raise e
def salvar(self, departamento):
newDao = deptoDao()
if (departamento.codDepartamento != None):
print('Alterando departamento...')
newDao.alterar(departamento)
print('Alterando com sucesso!')
else:
print('Inserindo departamento...')
newDao.inserir(departamento)
print('Inserido com sucesso!')
def listar(self):
vet = []
with psycopg2.connect(self._dados_con) as conn:
cur = conn.cursor()
cur.execute('SELECT * FROM "departamento"')
for linha in cur.fetchall():
depto = departamento(linha[1])
depto.codDept = int(linha[0])
vet.append(depto)
cur.close()
return vet
| 2,357
| -6
| 215
|
2d9493708a911e3f5dcfd12ee156d4f59611ca4e
| 72
|
py
|
Python
|
datasette/version.py
|
jefftriplett/datasette
|
1a30fc259205df736daf068c57a0a6ae2c21ffa9
|
[
"Apache-2.0"
] | 1
|
2020-11-03T17:40:11.000Z
|
2020-11-03T17:40:11.000Z
|
datasette/version.py
|
Quentinchampenois/datasette
|
13d1228d80c91d382a05b1a9549ed02c300ef851
|
[
"Apache-2.0"
] | null | null | null |
datasette/version.py
|
Quentinchampenois/datasette
|
13d1228d80c91d382a05b1a9549ed02c300ef851
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.51.1"
__version_info__ = tuple(__version__.split("."))
| 24
| 48
| 0.722222
|
__version__ = "0.51.1"
__version_info__ = tuple(__version__.split("."))
| 0
| 0
| 0
|
480bbce3a19feff8809ef78418436457fe3a997c
| 1,994
|
py
|
Python
|
rmrb/rmrb_daemon/modules/xorg_monitor.py
|
fjfhccfkuk/h_s_x_r_m_r_b_python
|
46fe249b1b71f1245296c8b2dbd6e7c29dadade4
|
[
"Unlicense"
] | null | null | null |
rmrb/rmrb_daemon/modules/xorg_monitor.py
|
fjfhccfkuk/h_s_x_r_m_r_b_python
|
46fe249b1b71f1245296c8b2dbd6e7c29dadade4
|
[
"Unlicense"
] | null | null | null |
rmrb/rmrb_daemon/modules/xorg_monitor.py
|
fjfhccfkuk/h_s_x_r_m_r_b_python
|
46fe249b1b71f1245296c8b2dbd6e7c29dadade4
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
| 23.186047
| 103
| 0.498495
|
#!/usr/bin/env python
def __get_xorg_pid():
pid=-1;
try:
import os;
pid = os.popen('ps aux | grep "/usr/bin/X" | grep -v "grep" | awk \'{print $2}\'').read();
# print "__get_xorg_pid. pid:" + str(pid);
except:
pid=-1
return pid;
def __get_vlc_pid():
pid = -1;
try:
import os;
pid = os.popen('ps aux | grep "vlc" | grep "sh" | grep -v "grep" | awk \'{print $2}\'').read();
# print "__get_vlc_pid. pid:" + str(pid);
except:
pid = -1
return pid;
def __get_xorg_socket_count():
count=0
try:
import os;
while True:
xorgPid = __get_xorg_pid()
# print "xorg pid:" + xorgPid
if xorgPid == "":
break;
xorgPid = xorgPid.replace("\n", "");
cmd = "sudo lsof -p %s" % xorgPid + " | grep socket | wc -l"
ret = os.popen(cmd).read();
ret = ret.replace("\n", "")
if ret == "":
break;
tmpInt = int(ret)
count = tmpInt;
break;
except Exception,e:
print "excp:" + e.message
count=0;
return count;
def __do_kill_vlc():
try:
import os
xorg_count = __get_xorg_socket_count() #MAX_XORG_CONT=210
if xorg_count >= 210:
pid = __get_vlc_pid()
cmd = "sudo kill -9 %s" % pid
os.popen(cmd).read()
except Exception, e:
print "__do_kill_vlc excp:" + e.message
return;
def do_monitor_vlc():
__do_kill_vlc()
def debug_xorg_monitor():
retStr = "sorry,nothing"
try:
count = __get_xorg_socket_count()
retStr = "xorg_socket_count:[%d" % count + "]";
xorg_pid = __get_xorg_pid();
retStr += "xorg_pid:[%s" % xorg_pid + "]";
vlc_pid = __get_vlc_pid()
retStr += " vlc_pid:[%s" % vlc_pid + "]";
except Exception,e:
print ""
return retStr.replace("\n", "");
| 1,828
| 0
| 138
|
91571235a6078c3ed7b0317752d9708a8b1d8915
| 709
|
py
|
Python
|
setup.py
|
constantinius/fabrant
|
c91a3594721a707a58e8fe5aa53ffbc5766c33e8
|
[
"MIT"
] | null | null | null |
setup.py
|
constantinius/fabrant
|
c91a3594721a707a58e8fe5aa53ffbc5766c33e8
|
[
"MIT"
] | null | null | null |
setup.py
|
constantinius/fabrant
|
c91a3594721a707a58e8fe5aa53ffbc5766c33e8
|
[
"MIT"
] | null | null | null |
from setuptools import setup
import fabrant
version = fabrant.__version__
setup(
name='fabrant',
version=version,
description="Easy handling of vagrant hosts within fabric",
long_description=open("README.rst").read(),
author='Fabian Schindler',
author_email='fabian.schindler@eox.at',
license='MIT',
url='https://github.com/constantinius/fabrant',
py_modules=['fabrant'],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
]
)
| 26.259259
| 63
| 0.643159
|
from setuptools import setup
import fabrant
version = fabrant.__version__
setup(
name='fabrant',
version=version,
description="Easy handling of vagrant hosts within fabric",
long_description=open("README.rst").read(),
author='Fabian Schindler',
author_email='fabian.schindler@eox.at',
license='MIT',
url='https://github.com/constantinius/fabrant',
py_modules=['fabrant'],
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
]
)
| 0
| 0
| 0
|
81d5d2a9dcca54b4f49a512570a95264c0ef66f2
| 6,500
|
py
|
Python
|
lab06.py
|
ucsb-cs8-m17/lab06_starter_code
|
9c91faa04399e22f79c8473823a6ab0756bf097e
|
[
"MIT"
] | null | null | null |
lab06.py
|
ucsb-cs8-m17/lab06_starter_code
|
9c91faa04399e22f79c8473823a6ab0756bf097e
|
[
"MIT"
] | null | null | null |
lab06.py
|
ucsb-cs8-m17/lab06_starter_code
|
9c91faa04399e22f79c8473823a6ab0756bf097e
|
[
"MIT"
] | null | null | null |
# tests for lab06, UCSB, CMPSC 8, Summer 2017
# Instructor: P. Conrad
# Student(s): (insert name here)
# @@@ This next function has an error. Can you fix it?
# @@@ Hint: you might need to use the "and" or "or" keywords of python
# @@@ and modify the if test.
def notStringContainingE(word):
"""
return True when word is a string that contains no letter 'e' (or 'E')
It should work both for lower and upper case.
When word isn't a string, return True
(because it is not a string contaning an E)
"""
if not(type(word)==str):
return True
for letter in word:
if letter == 'e':
return False
return True
#@@@ Here is a function definition that doesn't pass one or more of its tests.
#@@@ Fix it! (Also try to understand why it is wrong)
def hasNoX(word):
"""
return True when word is a string that contains no letter 'x'
(and no letter 'X')
It should work both for lower and upper case.
When word isn't a string, return True (because it is not a string
with an x in that case!)
"""
if (type(word)!=str):
return True
for letter in word:
if letter != 'x' and letter != 'X':
return True
return False
# The following function is provided for you as an example
# of how to write a Python function that checks if EVERY element
# of a list has some property.
def isNumber(item):
" return True if item is of type int or type float otherwise False "
return "stub" # HINT You already did this on in a previous lab
def isListOfNumber(theList):
"""
indicates whether value of argument is a list of only simple numbers
(int or float)
Note: empty list should return True---it doesn't contain anything that
ISN'T a simple number
theList can be anything, and the function will return either True or False.
"""
if not type(theList)==list:
return False # it isn't really a list!
# Now we can assume that theList really is a list
# But is it a list of all numerics?
# If we find even a single item that isn't numeric, we can
# immediately return false.
for item in theList:
if not isNumber(item):
return False
# If we get here and didn't return yet, then we know everything
# in the list is a simple numeric!
# (i.e. there isn't anything in the list that is NOT simple numeric)
return True
### @@@ NOW, write a function called isListOfIntegers(x)
### @@@ The function should take anything as an argument, and produce True
### @@@ only if argument is a list consisting of only int values
### @@@ similar to the comments above the other function definitions in this file
### @@@ See previous function for a clue as to how to proceed
### @@@ Note that empty list should return True (for same reasoning as in the previous function)
def isListOfIntegers(theList):
"""
indicates whether value of argument is a list of only int
Note: empty list should return True
because it doesn't contain anything that ISN'T int
theList can be anything, and it will return either True or False.
"""
return "stub"
### @@@ NOW, write a function called isListOfEvenIntegers(x)
### @@@ The function should take anything as an argument, and produce True
### @@@ only if argument is a list consisting of only int values that
### @@@ are even. See previous function for a clue as to how to proceed
### @@@ Note that empty list should return True
### @@@ HINT: to avoid problems when using the % operator
### @@@ (that's another hint), use your isListOfIntegers function first.
### @@@ This is sort of like the way that isListOfSimpleNumeric
### @@@ checks first to see if theList is a list.
### @@@ That way, you kill two birds with one stone---you immediately
### @@@ know that you are working with a list of integers, and you
### @@@ only have to worry about whether all of them are even or not.
def isListOfEvenIntegers(theList):
"""
indicates whether value of argument is a list of only even integers
Note: empty list should return True---it doesn't contain anything that
ISN'T an even integer
theList can be anything, and it will return either True or False.
"""
return "stub"
### @@@ NOW, write a function called totalLength(x)
### @@@ Use the accumulator pattern to compute the total length
### @@@ of all the words in a string
### @@@ The accumulator will be an integer that starts at zero.
### @@@ You'll use a for loop to look at each item in the list
def totalLength(listOfStrings):
"""
returns total length of all the strings in a list of strings,
False if argument not a list, 0 for empty list
"""
return "stub"
### @@@ NOW, write a function called lengthOfEach
### @@@ Use the accumulator pattern to make a list of each of
### @@@ the lengths of the words
### @@@ You'll use a for loop, starting the list as an empty list
def lengthOfEach(listOfStrings):
"""
given list of strings, returns list of ints correponding
to length of each string, otherwise False.
empty list yields empty list.
"""
return "stub"
### @@@ NOW, write a function called countEvens
### @@@ Use the accumulator pattern, starting at zero
### @@@ and add one each time you find an even number
def countEvens(listOfInts):
"""
given a list of ints, counts even ints in list.
Otherwise, returns False.
returns 0 for empty list, or for a list of ints with no evens in it.
"""
return "stub"
### @@@ NOW, write a function called onlyEvens
### @@@ Use the accumulator pattern, starting with an empty list.
### @@@ Use a for loop to traverse the list. Each time you find an item
### @@@ if it isn't an int, return False---otherwise, if it is even, add
### @@@ it to your accumulated list.
def onlyEvens(listOfInts):
"""
given a list of ints, return new list with only the even ones.
Otherwise, return false.
empty list yields empty list
"""
return "stub"
| 28.888889
| 96
| 0.667385
|
# tests for lab06, UCSB, CMPSC 8, Summer 2017
# Instructor: P. Conrad
# Student(s): (insert name here)
# @@@ This next function has an error. Can you fix it?
# @@@ Hint: you might need to use the "and" or "or" keywords of python
# @@@ and modify the if test.
def notStringContainingE(word):
"""
return True when word is a string that contains no letter 'e' (or 'E')
It should work both for lower and upper case.
When word isn't a string, return True
(because it is not a string contaning an E)
"""
if not(type(word)==str):
return True
for letter in word:
if letter == 'e':
return False
return True
#@@@ Here is a function definition that doesn't pass one or more of its tests.
#@@@ Fix it! (Also try to understand why it is wrong)
def hasNoX(word):
"""
return True when word is a string that contains no letter 'x'
(and no letter 'X')
It should work both for lower and upper case.
When word isn't a string, return True (because it is not a string
with an x in that case!)
"""
if (type(word)!=str):
return True
for letter in word:
if letter != 'x' and letter != 'X':
return True
return False
# The following function is provided for you as an example
# of how to write a Python function that checks if EVERY element
# of a list has some property.
def isNumber(item):
" return True if item is of type int or type float otherwise False "
return "stub" # HINT You already did this on in a previous lab
def isListOfNumber(theList):
"""
indicates whether value of argument is a list of only simple numbers
(int or float)
Note: empty list should return True---it doesn't contain anything that
ISN'T a simple number
theList can be anything, and the function will return either True or False.
"""
if not type(theList)==list:
return False # it isn't really a list!
# Now we can assume that theList really is a list
# But is it a list of all numerics?
# If we find even a single item that isn't numeric, we can
# immediately return false.
for item in theList:
if not isNumber(item):
return False
# If we get here and didn't return yet, then we know everything
# in the list is a simple numeric!
# (i.e. there isn't anything in the list that is NOT simple numeric)
return True
### @@@ NOW, write a function called isListOfIntegers(x)
### @@@ The function should take anything as an argument, and produce True
### @@@ only if argument is a list consisting of only int values
### @@@ similar to the comments above the other function definitions in this file
### @@@ See previous function for a clue as to how to proceed
### @@@ Note that empty list should return True (for same reasoning as in the previous function)
def isListOfIntegers(theList):
"""
indicates whether value of argument is a list of only int
Note: empty list should return True
because it doesn't contain anything that ISN'T int
theList can be anything, and it will return either True or False.
"""
return "stub"
### @@@ NOW, write a function called isListOfEvenIntegers(x)
### @@@ The function should take anything as an argument, and produce True
### @@@ only if argument is a list consisting of only int values that
### @@@ are even. See previous function for a clue as to how to proceed
### @@@ Note that empty list should return True
### @@@ HINT: to avoid problems when using the % operator
### @@@ (that's another hint), use your isListOfIntegers function first.
### @@@ This is sort of like the way that isListOfSimpleNumeric
### @@@ checks first to see if theList is a list.
### @@@ That way, you kill two birds with one stone---you immediately
### @@@ know that you are working with a list of integers, and you
### @@@ only have to worry about whether all of them are even or not.
def isListOfEvenIntegers(theList):
"""
indicates whether value of argument is a list of only even integers
Note: empty list should return True---it doesn't contain anything that
ISN'T an even integer
theList can be anything, and it will return either True or False.
"""
return "stub"
### @@@ NOW, write a function called totalLength(x)
### @@@ Use the accumulator pattern to compute the total length
### @@@ of all the words in a string
### @@@ The accumulator will be an integer that starts at zero.
### @@@ You'll use a for loop to look at each item in the list
def totalLength(listOfStrings):
"""
returns total length of all the strings in a list of strings,
False if argument not a list, 0 for empty list
"""
return "stub"
### @@@ NOW, write a function called lengthOfEach
### @@@ Use the accumulator pattern to make a list of each of
### @@@ the lengths of the words
### @@@ You'll use a for loop, starting the list as an empty list
def lengthOfEach(listOfStrings):
"""
given list of strings, returns list of ints correponding
to length of each string, otherwise False.
empty list yields empty list.
"""
return "stub"
### @@@ NOW, write a function called countEvens
### @@@ Use the accumulator pattern, starting at zero
### @@@ and add one each time you find an even number
def countEvens(listOfInts):
"""
given a list of ints, counts even ints in list.
Otherwise, returns False.
returns 0 for empty list, or for a list of ints with no evens in it.
"""
return "stub"
### @@@ NOW, write a function called onlyEvens
### @@@ Use the accumulator pattern, starting with an empty list.
### @@@ Use a for loop to traverse the list. Each time you find an item
### @@@ if it isn't an int, return False---otherwise, if it is even, add
### @@@ it to your accumulated list.
def onlyEvens(listOfInts):
"""
given a list of ints, return new list with only the even ones.
Otherwise, return false.
empty list yields empty list
"""
return "stub"
def test_onlyEvens_1():
assert onlyEvens('1')==False
def test_onlyEvens_1():
assert onlyEvens(['a','b'])==False
def test_onlyEvens_1():
assert onlyEvens([])==[]
def test_onlyEvens_1():
assert onlyEvens([1,2,3,4,5])==[2, 4]
def test_onlyEvens_1():
assert onlyEvens([1])==[]
def test_onlyEvens_1():
assert onlyEvens([1,3])==[]
def test_onlyEvens_1():
assert onlyEvens([3,2])==[2]
def test_onlyEvens_1():
assert onlyEvens([2,3,4])==[2, 4]
| 284
| 0
| 185
|
ffa97e68a616803fb3c0b4d35e6fca09a9443725
| 7,791
|
py
|
Python
|
tests/unit/test_taxonomy.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 9
|
2019-08-13T11:07:06.000Z
|
2022-01-14T18:15:13.000Z
|
tests/unit/test_taxonomy.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 166
|
2019-08-09T18:51:05.000Z
|
2021-12-02T15:24:15.000Z
|
tests/unit/test_taxonomy.py
|
ianbakst/tamr-client
|
ae7a6190a2251d31f973f9d5a7170ac5dc097f97
|
[
"Apache-2.0"
] | 21
|
2019-08-12T15:37:31.000Z
|
2021-06-15T14:06:23.000Z
|
from functools import partial
import json
from unittest import TestCase
from requests import HTTPError
import responses
from tamr_unify_client import Client
from tamr_unify_client.auth import UsernamePasswordAuth
from tamr_unify_client.categorization.category.collection import CategoryCollection
from tamr_unify_client.categorization.category.resource import Category, CategorySpec
from tamr_unify_client.categorization.taxonomy import Taxonomy
from tamr_unify_client.project.resource import Project
| 34.626667
| 88
| 0.586061
|
from functools import partial
import json
from unittest import TestCase
from requests import HTTPError
import responses
from tamr_unify_client import Client
from tamr_unify_client.auth import UsernamePasswordAuth
from tamr_unify_client.categorization.category.collection import CategoryCollection
from tamr_unify_client.categorization.category.resource import Category, CategorySpec
from tamr_unify_client.categorization.taxonomy import Taxonomy
from tamr_unify_client.project.resource import Project
class TestTaxonomy(TestCase):
def setUp(self):
auth = UsernamePasswordAuth("username", "password")
self.tamr = Client(auth)
@responses.activate
def test_categories(self):
cat_url = (
"http://localhost:9100/api/versioned/v1/projects/1/taxonomy/categories"
)
responses.add(responses.GET, cat_url, json=self._categories_json)
t = Taxonomy(self.tamr, self._taxonomy_json)
c = list(t.categories())
cats = [
Category(self.tamr, self._categories_json[0]),
Category(self.tamr, self._categories_json[1]),
]
self.assertEqual(repr(c), repr(cats))
@responses.activate
def test_by_id(self):
cat_url = (
"http://localhost:9100/api/versioned/v1/projects/1/taxonomy/categories/1"
)
responses.add(responses.GET, cat_url, json=self._categories_json[0])
c = CategoryCollection(self.tamr, "projects/1/taxonomy/categories")
r = c.by_relative_id("projects/1/taxonomy/categories/1")
self.assertEqual(r._data, self._categories_json[0])
r = c.by_resource_id("1")
self.assertEqual(r._data, self._categories_json[0])
self.assertRaises(NotImplementedError, c.by_external_id, "1")
@responses.activate
def test_create(self):
post_url = (
"http://localhost:9100/api/versioned/v1/projects/1/taxonomy/categories"
)
responses.add(responses.POST, post_url, json=self._categories_json[0])
alias = "projects/1/taxonomy/categories"
coll = CategoryCollection(self.tamr, alias)
creation_spec = {
"name": self._categories_json[0]["name"],
"path": self._categories_json[0]["path"],
}
c = coll.create(creation_spec)
self.assertEqual(alias + "/1", c.relative_id)
@responses.activate
def test_create_from_spec(self):
def create_callback(request, snoop):
snoop["payload"] = json.loads(request.body)
return 201, {}, json.dumps(self._categories_json[0])
post_url = (
"http://localhost:9100/api/versioned/v1/projects/1/taxonomy/categories"
)
snoop_dict = {}
responses.add_callback(
responses.POST, post_url, partial(create_callback, snoop=snoop_dict)
)
alias = "projects/1/taxonomy/categories"
coll = CategoryCollection(self.tamr, alias)
json_spec = {
"name": self._categories_json[0]["name"],
"path": self._categories_json[0]["path"],
}
spec = (
CategorySpec.new()
.with_name(self._categories_json[0]["name"])
.with_path(self._categories_json[0]["path"])
)
coll.create(spec.to_dict())
self.assertEqual(snoop_dict["payload"], json_spec)
@responses.activate
def test_bulk_create(self):
def create_callback(request, snoop):
snoop["payload"] = request.body
return 200, {}, json.dumps(self._bulk_json)
post_url = (
"http://localhost:9100/api/versioned/v1/projects/1/taxonomy/categories:bulk"
)
snoop_dict = {}
responses.add_callback(
responses.POST, post_url, partial(create_callback, snoop=snoop_dict)
)
alias = "projects/1/taxonomy/categories"
coll = CategoryCollection(self.tamr, alias)
creation_specs = [
{
"name": self._categories_json[0]["name"],
"path": self._categories_json[0]["path"],
},
{
"name": self._categories_json[1]["name"],
"path": self._categories_json[1]["path"],
},
]
j = coll.bulk_create(creation_specs)
self.assertEqual(j, self._bulk_json)
sent = []
for line in snoop_dict["payload"].split(b"\n"):
sent.append(json.loads(line))
self.assertEqual(sent, creation_specs)
@responses.activate
def test_delete(self):
url = "http://localhost:9100/api/versioned/v1/projects/1/taxonomy"
responses.add(responses.GET, url, json=self._taxonomy_json)
responses.add(responses.DELETE, url, status=204)
responses.add(responses.GET, url, status=404)
project = Project(
self.tamr, {"type": "CATEGORIZATION"}, "projects/1"
).as_categorization()
taxonomy = project.taxonomy()
self.assertEqual(taxonomy._data, self._taxonomy_json)
response = taxonomy.delete()
self.assertEqual(response.status_code, 204)
self.assertRaises(HTTPError, project.taxonomy)
@responses.activate
def test_delete_category(self):
url = "http://localhost:9100/api/versioned/v1/projects/1/taxonomy/categories/1"
responses.add(responses.GET, url, json=self._categories_json[0])
responses.add(responses.DELETE, url, status=204)
responses.add(responses.GET, url, status=404)
categories = CategoryCollection(self.tamr, "projects/1/taxonomy/categories")
category = categories.by_resource_id("1")
self.assertEqual(category._data, self._categories_json[0])
response = category.delete()
self.assertEqual(response.status_code, 204)
self.assertRaises(HTTPError, lambda: categories.by_resource_id("1"))
_taxonomy_json = {
"id": "unify://unified-data/v1/projects/1/taxonomy",
"name": "Test Taxonomy",
"created": {
"username": "admin",
"time": "2019-07-12T13:09:14.981Z",
"version": "405",
},
"lastModified": {
"username": "admin",
"time": "2019-07-12T13:09:14.981Z",
"version": "405",
},
"relativeId": "projects/1/taxonomy",
}
_categories_json = [
{
"id": "unify://unified-data/v1/projects/1/taxonomy/categories/1",
"name": "t1",
"description": "",
"parent": "",
"path": ["t1"],
"created": {
"username": "admin",
"time": "2019-07-12T13:10:52.988Z",
"version": "414",
},
"lastModified": {
"username": "admin",
"time": "2019-07-12T13:10:52.988Z",
"version": "414",
},
"relativeId": "projects/1/taxonomy/categories/1",
},
{
"id": "unify://unified-data/v1/projects/1/taxonomy/categories/2",
"name": "t2",
"description": "",
"parent": "unify://unified-data/v1/projects/1/taxonomy/categories/1",
"path": ["t1", "t2"],
"created": {
"username": "admin",
"time": "2019-07-12T13:51:20.600Z",
"version": "419",
},
"lastModified": {
"username": "admin",
"time": "2019-07-12T13:51:20.600Z",
"version": "419",
},
"relativeId": "projects/1/taxonomy/categories/2",
},
]
_bulk_json = {
"numCommandsProcessed": 2,
"allCommandsSucceeded": True,
"validationErrors": [],
}
| 4,982
| 2,282
| 23
|
7369f037084a3962ddf0eee3df3221a505a6fe8d
| 1,619
|
py
|
Python
|
Wrapping/Generators/Python/Tests/FlatStructuringElement.py
|
ltmakela/ITK
|
21f48c6d98e21ecece09be16a747221d7094d8a9
|
[
"Apache-2.0"
] | 4
|
2015-05-22T03:47:43.000Z
|
2016-06-16T20:57:21.000Z
|
Wrapping/Generators/Python/Tests/FlatStructuringElement.py
|
GEHC-Surgery/ITK
|
f5df62749e56c9036e5888cfed904032ba5fdfb7
|
[
"Apache-2.0"
] | null | null | null |
Wrapping/Generators/Python/Tests/FlatStructuringElement.py
|
GEHC-Surgery/ITK
|
f5df62749e56c9036e5888cfed904032ba5fdfb7
|
[
"Apache-2.0"
] | 9
|
2016-06-23T16:03:12.000Z
|
2022-03-31T09:25:08.000Z
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import itk
from sys import argv, exit
itk.auto_progress(2)
if argv[2] == "Ball":
print "Ball"
strel = itk.FlatStructuringElement[2].Ball( int( argv[3] ) )
elif argv[2] == "Box":
print "Box"
strel = itk.FlatStructuringElement[2].Box( int( argv[3] ) )
elif argv[2] == "FromImage":
print "FromImage"
reader = itk.ImageFileReader.IUC2.New( FileName=argv[3] )
strel = itk.FlatStructuringElement[2].FromImageUC( reader.GetOutput() )
else:
print "invalid arguement: " + argv[2]
exit(1)
img = strel.GetImageUC()
size = itk.size( img )
for y in range(0, size.GetElement(1)):
for x in range(0, size.GetElement(0)):
if img.GetPixel( [x, y] ):
print "X",
else:
print " ",
print "\n",
itk.write( img, argv[1] )
# writer = itk.ImageFileWriter.IUC2.New(FileName=argv[1], Input=img )
# itk.echo(writer)
# writer.Update()
| 31.134615
| 77
| 0.622607
|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
import itk
from sys import argv, exit
itk.auto_progress(2)
if argv[2] == "Ball":
print "Ball"
strel = itk.FlatStructuringElement[2].Ball( int( argv[3] ) )
elif argv[2] == "Box":
print "Box"
strel = itk.FlatStructuringElement[2].Box( int( argv[3] ) )
elif argv[2] == "FromImage":
print "FromImage"
reader = itk.ImageFileReader.IUC2.New( FileName=argv[3] )
strel = itk.FlatStructuringElement[2].FromImageUC( reader.GetOutput() )
else:
print "invalid arguement: " + argv[2]
exit(1)
img = strel.GetImageUC()
size = itk.size( img )
for y in range(0, size.GetElement(1)):
for x in range(0, size.GetElement(0)):
if img.GetPixel( [x, y] ):
print "X",
else:
print " ",
print "\n",
itk.write( img, argv[1] )
# writer = itk.ImageFileWriter.IUC2.New(FileName=argv[1], Input=img )
# itk.echo(writer)
# writer.Update()
| 0
| 0
| 0
|
e5f027cbb302d153a7c3344375fc208328785c24
| 145
|
py
|
Python
|
1071.py
|
barroslipe/urionlinejudge
|
a20d8199d9a92b30ea394a6c949967d2fc51aa34
|
[
"MIT"
] | null | null | null |
1071.py
|
barroslipe/urionlinejudge
|
a20d8199d9a92b30ea394a6c949967d2fc51aa34
|
[
"MIT"
] | null | null | null |
1071.py
|
barroslipe/urionlinejudge
|
a20d8199d9a92b30ea394a6c949967d2fc51aa34
|
[
"MIT"
] | null | null | null |
x = int(input())
y = int(input())
if x > y:
x, y = y, x
soma = 0
for i in range(x+1, y, 1):
if i%2 != 0:
soma += i
print(soma)
| 12.083333
| 26
| 0.448276
|
x = int(input())
y = int(input())
if x > y:
x, y = y, x
soma = 0
for i in range(x+1, y, 1):
if i%2 != 0:
soma += i
print(soma)
| 0
| 0
| 0
|
a632fc8a11dc7f1808f06cdcb2bf2ece87cfe09f
| 2,554
|
py
|
Python
|
gethouse/models.py
|
Alvin-21/patanyumba
|
4084d8b25a1685fa3a1ce8a57f782fa01bc794cb
|
[
"MIT"
] | null | null | null |
gethouse/models.py
|
Alvin-21/patanyumba
|
4084d8b25a1685fa3a1ce8a57f782fa01bc794cb
|
[
"MIT"
] | null | null | null |
gethouse/models.py
|
Alvin-21/patanyumba
|
4084d8b25a1685fa3a1ce8a57f782fa01bc794cb
|
[
"MIT"
] | null | null | null |
from django.db import models
from cloudinary.models import CloudinaryField
from django.contrib.auth.models import User
# Create your models here.
| 29.697674
| 89
| 0.667189
|
from django.db import models
from cloudinary.models import CloudinaryField
from django.contrib.auth.models import User
# Create your models here.
class Amenities(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class Accomodation(models.Model):
PROPERTY_TYPE_VALUES = (
('House', 'House'),
('Apartment', 'Apartment'),
)
LEN_OF_STAY_VALUES = (
('1 week', '1 week'),
('2 weeks', '2 weeks'),
('1 month', '1 month'),
('2 months', '2 months'),
('3 months', '3 months'),
('4 months', '4 months'),
('6 months', '6 months'),
('9 months', '9 months'),
('12 months +', '12 months +'),
)
user = models.ForeignKey(User, on_delete=models.CASCADE)
image = CloudinaryField('image', null=True)
title = models.CharField(max_length=50)
description = models.CharField(max_length=2000)
address = models.CharField(max_length=150)
type_of_property = models.CharField(choices=PROPERTY_TYPE_VALUES, max_length=100)
rent = models.IntegerField()
bedrooms = models.PositiveIntegerField()
bathrooms = models.PositiveIntegerField()
amenities = models.ManyToManyField(Amenities)
number_of_residents = models.PositiveIntegerField()
date_available = models.DateField()
minimum_length_of_stay = models.CharField(choices=LEN_OF_STAY_VALUES, max_length=100)
def __str__(self):
return self.title
def save_accom(self):
self.save()
def delete_accom(self):
self.delete()
@classmethod
def search_by_address(cls, search_term):
accomodation = cls.objects.filter(address__icontains=search_term)
return accomodation
@classmethod
def get_accom_by_id(cls, accom_id):
accom = cls.objects.get(id=accom_id)
return accom
class Profile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
image = CloudinaryField('image', null=True)
bio = models.CharField(max_length=200)
email = models.EmailField()
number = models.CharField(max_length=10, blank=True)
def __str__(self):
return self.first_name + " " + self.last_name
def save_profile(self):
self.save()
def delete_profile(self):
self.delete()
class SubscriptionRecipients(models.Model):
name = models.CharField(max_length=30)
email = models.EmailField()
| 389
| 1,920
| 92
|
85709feb51b43f2818c30daf6bbb587cb3a6b7ce
| 295
|
py
|
Python
|
file-response/main.py
|
Nivratti/fastapi
|
d3f8c750ae15201b50be80d998cfafe6e8d155e9
|
[
"Apache-2.0"
] | null | null | null |
file-response/main.py
|
Nivratti/fastapi
|
d3f8c750ae15201b50be80d998cfafe6e8d155e9
|
[
"Apache-2.0"
] | null | null | null |
file-response/main.py
|
Nivratti/fastapi
|
d3f8c750ae15201b50be80d998cfafe6e8d155e9
|
[
"Apache-2.0"
] | null | null | null |
from fastapi import FastAPI
from fastapi.responses import FileResponse
app = FastAPI()
@app.get("/")
| 24.583333
| 57
| 0.728814
|
from fastapi import FastAPI
from fastapi.responses import FileResponse
app = FastAPI()
@app.get("/")
async def main():
# sending it as a attachment
# specify filename to send it as attachment
some_file_path = "dog_bgr.png"
return FileResponse(some_file_path, filename="f.png")
| 170
| 0
| 22
|
344ecc1b03332848b6d7b6ff0acbdf7d2641f9c7
| 1,283
|
py
|
Python
|
backend/tradersplatform/article_comment/serializers.py
|
ybedirhanpak/bounswe2019group1
|
9572fd307345b3f842c2c2ff4426857086484ed5
|
[
"MIT"
] | 10
|
2019-02-14T14:53:49.000Z
|
2019-10-23T08:03:39.000Z
|
backend/tradersplatform/article_comment/serializers.py
|
ybedirhanpak/bounswe2019group1
|
9572fd307345b3f842c2c2ff4426857086484ed5
|
[
"MIT"
] | 364
|
2019-02-14T14:50:12.000Z
|
2022-02-10T13:43:09.000Z
|
backend/tradersplatform/article_comment/serializers.py
|
bounswe/bounswe2019group1
|
9572fd307345b3f842c2c2ff4426857086484ed5
|
[
"MIT"
] | 8
|
2019-05-05T20:04:31.000Z
|
2020-12-24T16:44:54.000Z
|
from rest_framework.serializers import ModelSerializer
from article_comment.models import ArticleComment
from myuser.serializers import TempUserListSerializer
| 24.207547
| 54
| 0.466095
|
from rest_framework.serializers import ModelSerializer
from article_comment.models import ArticleComment
from myuser.serializers import TempUserListSerializer
class ArticleCommentCreateSerializer(ModelSerializer):
class Meta:
model = ArticleComment
fields = [
'id',
'text',
'user',
'article',
'created_date',
]
class ArticleCommentUpdateSerializer(ModelSerializer):
class Meta:
model = ArticleComment
fields = [
'id',
'text',
'user',
'article',
'created_date',
]
extra_kwargs = {"text":
{"required": False},
"user":
{"required": False},
"article":
{"required": False},
"created_date":
{"required": False},
}
class ArticleCommentListSerializer(ModelSerializer):
user = TempUserListSerializer()
class Meta:
model = ArticleComment
fields = [
'id',
'text',
'user',
'article',
'created_date',
]
| 0
| 1,051
| 69
|
ac43f58b8c8a982a75d5c10502ff59e958530ecc
| 6,556
|
py
|
Python
|
promt_tr/promt_tr.py
|
ffreemt/promt-tr-free
|
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
|
[
"MIT"
] | null | null | null |
promt_tr/promt_tr.py
|
ffreemt/promt-tr-free
|
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
|
[
"MIT"
] | null | null | null |
promt_tr/promt_tr.py
|
ffreemt/promt-tr-free
|
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
|
[
"MIT"
] | null | null | null |
'''
promt translate for free as in beer
'''
from typing import Any, Callable, Dict, Tuple
import sys
import logging
import json
from time import time
from random import randint
import pytest # type: ignore
# import mock
import urllib3
from ratelimit import limits, sleep_and_retry # type: ignore
import requests
from fuzzywuzzy import fuzz, process # type: ignore
import coloredlogs # type: ignore
from jmespath import search # type: ignore
urllib3.disable_warnings()
# logging.captureWarnings(True)
# logging.getLogger('requests.packages.urllib3.connectionpool').level = 30
LOGGER = logging.getLogger(__name__)
FMT = '%(filename)-14s[%(lineno)-3d] %(message)s [%(funcName)s]'
coloredlogs.install(level=20, logger=LOGGER, fmt=FMT)
# en-ar en-zhcn
LANG_CODES = (
"ar,ca,zhcn,nl,fi,fr,de,el,he,hi,it,ja,kk,ko,pt,ru,es,tr,uk"
).split(',') + ['auto']
URL = (
'https://www.online-translator.com/'
'services/soap.asmx/GetTranslation'
)
UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17' # noqa
# HEADERS = {"User-Agent": UA}
HEADERS = {
'Host': 'www.online-translator.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Content-Type': 'application/json; charset=utf-8',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'https://www.online-translator.com',
# 'DNT': '1',
'Referer': 'https://www.online-translator.com/',
}
SESS = requests.Session()
SESS.get('https://www.online-translator.com/', verify=0)
def with_func_attrs(**attrs: Any) -> Callable:
''' with_func_attrs '''
return with_attrs
@with_func_attrs(text='')
def _promt_tr(
text: str,
from_lang: str = 'auto',
to_lang: str = 'zh',
timeout: Tuple[float, float] = (55, 66),
) -> Dict[str, str]:
''' promt_tr
text = 'test one two three'
from_lang = 'auto'
to_lang = 'zh'
timeout = (55, 66)
'''
try:
from_lang = from_lang.lower()
except Exception as exc: # pragma: no cover
LOGGER.warning("%s", exc)
from_lang = 'auto'
try:
to_lang = to_lang.lower()
except Exception as exc: # pragma: no cover
LOGGER.warning("%s", exc)
to_lang = 'zh'
if from_lang in ['zh', 'chinese', 'zhongwen']:
from_lang = 'zhcn'
if to_lang in ['zh', 'chinese', 'zhongwen']:
to_lang = 'zhcn'
try:
from_lang = process.extractOne(from_lang, LANG_CODES, scorer=fuzz.UWRatio)[0] # noqa
except Exception as exc: # pragma: no cover
LOGGER.warning("%s", exc)
from_lang = 'en'
try:
to_lang = process.extractOne(to_lang, LANG_CODES, scorer=fuzz.UWRatio)[0] # noqa
except Exception as exc: # pragma: no cover
LOGGER.warning("%s", exc)
to_lang = 'en'
if from_lang == 'auto':
from_lang = 'au'
if to_lang == 'auto': # pragma: no cover
to_lang = 'au'
dir_code = f'{from_lang}-{to_lang}'
data = {
'dirCode': dir_code,
# 'dirCode': 'de-en',
'template': 'General',
# 'text': 'Das sind drei Teste.',
'text': text,
'lang': 'en',
'limit': '3000',
'useAutoDetect': True,
'key': '123',
'ts': 'MainSite',
'tid': '',
'IsMobile': False
}
try:
resp = SESS.post( # type: ignore # data # expected "Union[None, bytes, MutableMapping[str, str], IO[Any]] # noqa
URL,
# data=data2,
data=json.dumps(data),
headers=HEADERS,
timeout=timeout,
)
resp.raise_for_status()
except Exception as exc: # pragma: no cover
LOGGER.error('%s', exc)
resp = requests.models.Response()
resp._content = f'{{"errorCode": "{exc}"}}'.encode()
resp.status_code = 499
try:
jdata = resp.json()
except Exception as exc: # pragma: no cover
LOGGER.error('%s', exc)
jdata = {'error': str(exc)}
promt_tr.text = resp.text
try:
# res = search('[0].translations[0].text', jdata)
res = search('d.result', jdata)
except Exception as exc: # pragma: no cover
LOGGER.error('%s', exc)
res = {'error': str(exc)}
return res
@sleep_and_retry
@limits(calls=30, period=20, raise_on_limit=True) # raise_on_limit probably superfluous
def _rl_promt_tr(*args, **kwargs):
''' be nice and throttle'''
LOGGER.info(' rate limiting 3 calls/2 secs... ')
return _promt_tr(*args, **kwargs)
@with_func_attrs(calls=0, call_tick=-1)
def promt_tr(*args, **kwargs):
''' exempt first 200 calls from rate limiting '''
# increase calls unto 210
if promt_tr.calls < 210:
promt_tr.calls += 1
# reset rate limit if the last call was 2 minutes ago
tick = time()
if tick - promt_tr.call_tick > 120:
promt_tr.calls = 1
promt_tr.call_tick = tick
if promt_tr.calls < 200:
return _promt_tr(*args, **kwargs)
return _rl_promt_tr(*args, **kwargs)
@pytest.mark.parametrize(
# 'to_lang', LANG_CODES
'to_lang', ['zh', 'de', 'fr', 'it', 'ko', 'ja', 'ru']
)
def test_sanity(to_lang):
'sanity test'
numb = str(randint(1, 10000))
text = 'test ' + numb
assert numb in promt_tr(text, to_lang=to_lang)
def test_calls():
''' test calls '''
_ = promt_tr('test ')
calls = promt_tr.calls
_ = promt_tr('test ')
assert promt_tr.calls == calls + 1
def main(): # pragma: no cover
''' main '''
text = sys.argv[1:]
text1 = ''
if not text:
print(' Provide something to translate, testing with some random text\n')
text = 'test tihs and that' + str(randint(1, 1000))
text1 = 'test tihs and that' + str(randint(1, 1000))
print(f'{text} translated to:')
for to_lang in ['zh', 'de', 'fr', ]:
print(f'{to_lang}: {promt_tr(text, to_lang=to_lang)}')
if not text1:
print(f'{to_lang}: {promt_tr(text1, to_lang=to_lang)}')
def init():
''' attempted to pytest __name__ == '__main__' '''
LOGGER.debug('__name__: %s', __name__)
if __name__ == '__main__':
sys.exit(main())
init()
# test_init()
| 27.779661
| 133
| 0.596858
|
'''
promt translate for free as in beer
'''
from typing import Any, Callable, Dict, Tuple
import sys
import logging
import json
from time import time
from random import randint
import pytest # type: ignore
# import mock
import urllib3
from ratelimit import limits, sleep_and_retry # type: ignore
import requests
from fuzzywuzzy import fuzz, process # type: ignore
import coloredlogs # type: ignore
from jmespath import search # type: ignore
urllib3.disable_warnings()
# logging.captureWarnings(True)
# logging.getLogger('requests.packages.urllib3.connectionpool').level = 30
LOGGER = logging.getLogger(__name__)
FMT = '%(filename)-14s[%(lineno)-3d] %(message)s [%(funcName)s]'
coloredlogs.install(level=20, logger=LOGGER, fmt=FMT)
# en-ar en-zhcn
LANG_CODES = (
"ar,ca,zhcn,nl,fi,fr,de,el,he,hi,it,ja,kk,ko,pt,ru,es,tr,uk"
).split(',') + ['auto']
URL = (
'https://www.online-translator.com/'
'services/soap.asmx/GetTranslation'
)
UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17' # noqa
# HEADERS = {"User-Agent": UA}
HEADERS = {
'Host': 'www.online-translator.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:70.0) Gecko/20100101 Firefox/70.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Content-Type': 'application/json; charset=utf-8',
'X-Requested-With': 'XMLHttpRequest',
'Origin': 'https://www.online-translator.com',
# 'DNT': '1',
'Referer': 'https://www.online-translator.com/',
}
SESS = requests.Session()
SESS.get('https://www.online-translator.com/', verify=0)
def with_func_attrs(**attrs: Any) -> Callable:
''' with_func_attrs '''
def with_attrs(fct: Callable) -> Callable:
for key, val in attrs.items():
setattr(fct, key, val)
return fct
return with_attrs
@with_func_attrs(text='')
def _promt_tr(
text: str,
from_lang: str = 'auto',
to_lang: str = 'zh',
timeout: Tuple[float, float] = (55, 66),
) -> Dict[str, str]:
''' promt_tr
text = 'test one two three'
from_lang = 'auto'
to_lang = 'zh'
timeout = (55, 66)
'''
try:
from_lang = from_lang.lower()
except Exception as exc: # pragma: no cover
LOGGER.warning("%s", exc)
from_lang = 'auto'
try:
to_lang = to_lang.lower()
except Exception as exc: # pragma: no cover
LOGGER.warning("%s", exc)
to_lang = 'zh'
if from_lang in ['zh', 'chinese', 'zhongwen']:
from_lang = 'zhcn'
if to_lang in ['zh', 'chinese', 'zhongwen']:
to_lang = 'zhcn'
try:
from_lang = process.extractOne(from_lang, LANG_CODES, scorer=fuzz.UWRatio)[0] # noqa
except Exception as exc: # pragma: no cover
LOGGER.warning("%s", exc)
from_lang = 'en'
try:
to_lang = process.extractOne(to_lang, LANG_CODES, scorer=fuzz.UWRatio)[0] # noqa
except Exception as exc: # pragma: no cover
LOGGER.warning("%s", exc)
to_lang = 'en'
if from_lang == 'auto':
from_lang = 'au'
if to_lang == 'auto': # pragma: no cover
to_lang = 'au'
dir_code = f'{from_lang}-{to_lang}'
data = {
'dirCode': dir_code,
# 'dirCode': 'de-en',
'template': 'General',
# 'text': 'Das sind drei Teste.',
'text': text,
'lang': 'en',
'limit': '3000',
'useAutoDetect': True,
'key': '123',
'ts': 'MainSite',
'tid': '',
'IsMobile': False
}
try:
resp = SESS.post( # type: ignore # data # expected "Union[None, bytes, MutableMapping[str, str], IO[Any]] # noqa
URL,
# data=data2,
data=json.dumps(data),
headers=HEADERS,
timeout=timeout,
)
resp.raise_for_status()
except Exception as exc: # pragma: no cover
LOGGER.error('%s', exc)
resp = requests.models.Response()
resp._content = f'{{"errorCode": "{exc}"}}'.encode()
resp.status_code = 499
try:
jdata = resp.json()
except Exception as exc: # pragma: no cover
LOGGER.error('%s', exc)
jdata = {'error': str(exc)}
promt_tr.text = resp.text
try:
# res = search('[0].translations[0].text', jdata)
res = search('d.result', jdata)
except Exception as exc: # pragma: no cover
LOGGER.error('%s', exc)
res = {'error': str(exc)}
return res
@sleep_and_retry
@limits(calls=30, period=20, raise_on_limit=True) # raise_on_limit probably superfluous
def _rl_promt_tr(*args, **kwargs):
''' be nice and throttle'''
LOGGER.info(' rate limiting 3 calls/2 secs... ')
return _promt_tr(*args, **kwargs)
@with_func_attrs(calls=0, call_tick=-1)
def promt_tr(*args, **kwargs):
''' exempt first 200 calls from rate limiting '''
# increase calls unto 210
if promt_tr.calls < 210:
promt_tr.calls += 1
# reset rate limit if the last call was 2 minutes ago
tick = time()
if tick - promt_tr.call_tick > 120:
promt_tr.calls = 1
promt_tr.call_tick = tick
if promt_tr.calls < 200:
return _promt_tr(*args, **kwargs)
return _rl_promt_tr(*args, **kwargs)
@pytest.mark.parametrize(
# 'to_lang', LANG_CODES
'to_lang', ['zh', 'de', 'fr', 'it', 'ko', 'ja', 'ru']
)
def test_sanity(to_lang):
'sanity test'
numb = str(randint(1, 10000))
text = 'test ' + numb
assert numb in promt_tr(text, to_lang=to_lang)
def test_calls():
''' test calls '''
_ = promt_tr('test ')
calls = promt_tr.calls
_ = promt_tr('test ')
assert promt_tr.calls == calls + 1
def main(): # pragma: no cover
''' main '''
text = sys.argv[1:]
text1 = ''
if not text:
print(' Provide something to translate, testing with some random text\n')
text = 'test tihs and that' + str(randint(1, 1000))
text1 = 'test tihs and that' + str(randint(1, 1000))
print(f'{text} translated to:')
for to_lang in ['zh', 'de', 'fr', ]:
print(f'{to_lang}: {promt_tr(text, to_lang=to_lang)}')
if not text1:
print(f'{to_lang}: {promt_tr(text1, to_lang=to_lang)}')
def init():
''' attempted to pytest __name__ == '__main__' '''
LOGGER.debug('__name__: %s', __name__)
if __name__ == '__main__':
sys.exit(main())
init()
# test_init()
| 114
| 0
| 26
|
ec66d8df79c61b3d2ed3ec367d9cc7908a22013a
| 878
|
py
|
Python
|
kite-go/navigation/offline/experiments/quip-issues/relevant.py
|
kiteco/kiteco-public
|
74aaf5b9b0592153b92f7ed982d65e15eea885e3
|
[
"BSD-3-Clause"
] | 17
|
2022-01-10T11:01:50.000Z
|
2022-03-25T03:21:08.000Z
|
kite-go/navigation/offline/experiments/quip-issues/relevant.py
|
kiteco/kiteco-public
|
74aaf5b9b0592153b92f7ed982d65e15eea885e3
|
[
"BSD-3-Clause"
] | 1
|
2022-01-13T14:28:47.000Z
|
2022-01-13T14:28:47.000Z
|
kite-go/navigation/offline/experiments/quip-issues/relevant.py
|
kiteco/kiteco-public
|
74aaf5b9b0592153b92f7ed982d65e15eea885e3
|
[
"BSD-3-Clause"
] | 7
|
2022-01-07T03:58:10.000Z
|
2022-03-24T07:38:20.000Z
|
import argparse
import json
from collections import defaultdict
from typing import Dict, List
if __name__ == "__main__":
main()
| 25.085714
| 74
| 0.65262
|
import argparse
import json
from collections import defaultdict
from typing import Dict, List
def main() -> None:
args = parse_args()
with open(args.links, "r") as fp:
links = json.load(fp)
quip_issues = make_quip_issues(links)
with open(args.relevant_issues, "w") as f:
json.dump(quip_issues, f, indent=2)
def make_quip_issues(links: Dict[str, List[str]]) -> Dict[str, List[str]]:
quip_issues: Dict[str, List[str]] = defaultdict(list)
for x, ys in links.items():
for y in set(ys):
quip_issues[y.split("/")[-1]].append(x.split("/")[-1])
return quip_issues
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--links", type=str)
parser.add_argument("--relevant_issues", type=str)
return parser.parse_args()
if __name__ == "__main__":
main()
| 673
| 0
| 69
|
33190b249bfea8e389858313a9b36fc7c3e017ce
| 1,605
|
py
|
Python
|
ggtools/gg/static_models.py
|
richannan/GGTOOLS
|
7909da988d90de50c82532d97121a3fbcfc0263a
|
[
"MIT"
] | 22
|
2019-12-16T01:30:29.000Z
|
2022-03-01T08:57:07.000Z
|
ggtools/gg/static_models.py
|
richannan/GGTOOLS
|
7909da988d90de50c82532d97121a3fbcfc0263a
|
[
"MIT"
] | 3
|
2019-12-23T14:09:30.000Z
|
2022-03-29T01:52:53.000Z
|
ggtools/gg/static_models.py
|
richannan/GGTOOLS
|
7909da988d90de50c82532d97121a3fbcfc0263a
|
[
"MIT"
] | 13
|
2019-12-19T07:01:19.000Z
|
2022-03-14T11:26:36.000Z
|
from os import path,makedirs
from urllib.request import urlretrieve
def static_download(model):
'''
Download static gravity modle from icgem.gfz-potsdam.de; if the file to be downloaded is already included in the download directory, the download is automatically skipped.
Usage:
static_download('GGM05C')
static_download('EIGEN-6C4')
Inputs:
model -> [str] Available options are 'GGM05C' and 'EIGEN-6C'.
Outputs: downloaded static gravity model
Examples:
>>> static_download('GGM05C')
Downloading the static gravity model GGM05C ... Finished
'static_models/GGM05C.gfc'
>>> static_download('EIGEN-6C4')
Downloading the static gravity model EIGEN-6C4 ... Finished
'static_models/EIGEN-6C4.gfc'
'''
direc = 'static_models/'
if not path.exists(direc): makedirs(direc)
if model == 'GGM05C':
gravity_file = direc + 'GGM05C.gfc'
url = 'http://icgem.gfz-potsdam.de/getmodel/gfc/778a683780a5b0ad3163f4772b97b9075a0a13c389d2bd8ea3f891b64cfa383d/GGM05C.gfc'
elif model == 'EIGEN-6C4':
gravity_file = direc + 'EIGEN-6C4.gfc'
url = 'http://icgem.gfz-potsdam.de/getmodel/gfc/7fd8fe44aa1518cd79ca84300aef4b41ddb2364aef9e82b7cdaabdb60a9053f1/EIGEN-6C4.gfc'
else:
raise Exception('Currently, available static gravity models are GGM05C and EIGEN-6C4.')
if not path.exists(gravity_file):
print('Downloading the static gravity model '+ model,end=' ... ')
urlretrieve(url, gravity_file)
print('Finished')
return gravity_file
| 38.214286
| 175
| 0.688474
|
from os import path,makedirs
from urllib.request import urlretrieve
def static_download(model):
'''
Download static gravity modle from icgem.gfz-potsdam.de; if the file to be downloaded is already included in the download directory, the download is automatically skipped.
Usage:
static_download('GGM05C')
static_download('EIGEN-6C4')
Inputs:
model -> [str] Available options are 'GGM05C' and 'EIGEN-6C'.
Outputs: downloaded static gravity model
Examples:
>>> static_download('GGM05C')
Downloading the static gravity model GGM05C ... Finished
'static_models/GGM05C.gfc'
>>> static_download('EIGEN-6C4')
Downloading the static gravity model EIGEN-6C4 ... Finished
'static_models/EIGEN-6C4.gfc'
'''
direc = 'static_models/'
if not path.exists(direc): makedirs(direc)
if model == 'GGM05C':
gravity_file = direc + 'GGM05C.gfc'
url = 'http://icgem.gfz-potsdam.de/getmodel/gfc/778a683780a5b0ad3163f4772b97b9075a0a13c389d2bd8ea3f891b64cfa383d/GGM05C.gfc'
elif model == 'EIGEN-6C4':
gravity_file = direc + 'EIGEN-6C4.gfc'
url = 'http://icgem.gfz-potsdam.de/getmodel/gfc/7fd8fe44aa1518cd79ca84300aef4b41ddb2364aef9e82b7cdaabdb60a9053f1/EIGEN-6C4.gfc'
else:
raise Exception('Currently, available static gravity models are GGM05C and EIGEN-6C4.')
if not path.exists(gravity_file):
print('Downloading the static gravity model '+ model,end=' ... ')
urlretrieve(url, gravity_file)
print('Finished')
return gravity_file
| 0
| 0
| 0
|
5fd7c795a966a620873f6dbbef744b63cf0773db
| 427
|
py
|
Python
|
projects/migrations/0020_auto_20170131_0419.py
|
18F/acquisitions.18f.gov
|
7ef7091fd65b4b6797ddeb1c1f56def29522c43b
|
[
"CC0-1.0"
] | 3
|
2016-11-27T05:02:52.000Z
|
2017-01-31T17:36:36.000Z
|
projects/migrations/0020_auto_20170131_0419.py
|
18F/acquisitions.18f.gov
|
7ef7091fd65b4b6797ddeb1c1f56def29522c43b
|
[
"CC0-1.0"
] | 61
|
2016-11-05T00:27:34.000Z
|
2017-09-15T23:37:58.000Z
|
projects/migrations/0020_auto_20170131_0419.py
|
18F/acquisitions.18f.gov
|
7ef7091fd65b4b6797ddeb1c1f56def29522c43b
|
[
"CC0-1.0"
] | 2
|
2017-07-14T06:21:26.000Z
|
2021-02-14T11:53:05.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-31 04:19
from __future__ import unicode_literals
from django.db import migrations
| 20.333333
| 48
| 0.604215
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-31 04:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0019_auto_20170131_0412'),
]
operations = [
migrations.RenameField(
model_name='buy',
old_name='dollars',
new_name='budget',
),
]
| 0
| 256
| 23
|
df77b7f033135104bc001ca98a8adb61a14c3d27
| 31
|
py
|
Python
|
resources/sound/__init__.py
|
Keshav-cs/Genetic-Algorithm-on-Super-Mario-Bros
|
1a115c6b4ac3345875c8530d8a6ea044c33c403e
|
[
"MIT"
] | 418
|
2015-01-05T19:31:18.000Z
|
2022-03-27T03:05:33.000Z
|
resources/sound/__init__.py
|
Keshav-cs/Genetic-Algorithm-on-Super-Mario-Bros
|
1a115c6b4ac3345875c8530d8a6ea044c33c403e
|
[
"MIT"
] | 3
|
2021-04-29T19:58:05.000Z
|
2021-05-01T05:15:02.000Z
|
resources/sound/__init__.py
|
Keshav-cs/Genetic-Algorithm-on-Super-Mario-Bros
|
1a115c6b4ac3345875c8530d8a6ea044c33c403e
|
[
"MIT"
] | 157
|
2015-01-05T19:06:29.000Z
|
2022-01-16T22:55:37.000Z
|
__author__ = 'justinarmstrong'
| 15.5
| 30
| 0.806452
|
__author__ = 'justinarmstrong'
| 0
| 0
| 0
|
76d764d3ec7df55c3465bdeb774b3494cc0fb43a
| 321
|
py
|
Python
|
chapter3/fixed_input_agent.py
|
yuishihara/probabilistic_robotics_implementations
|
91115260cb95697f89b1413d49dd45ebe3014a53
|
[
"MIT"
] | null | null | null |
chapter3/fixed_input_agent.py
|
yuishihara/probabilistic_robotics_implementations
|
91115260cb95697f89b1413d49dd45ebe3014a53
|
[
"MIT"
] | null | null | null |
chapter3/fixed_input_agent.py
|
yuishihara/probabilistic_robotics_implementations
|
91115260cb95697f89b1413d49dd45ebe3014a53
|
[
"MIT"
] | null | null | null |
from agent import Agent
| 24.692308
| 52
| 0.635514
|
from agent import Agent
class FixedInputAgent(Agent):
def __init__(self, robot, vel=0.2, omega=0.0):
super(FixedInputAgent, self).__init__(robot)
self._vel = vel
self._omega = omega
def act(self, delta_t):
ut = (self._vel, self._omega)
self._robot.one_step(ut, delta_t)
| 212
| 8
| 76
|
ef57af46c6b2e9ab1c578fab32e7696057a50f5d
| 1,011
|
py
|
Python
|
python/tests/testpickle.py
|
seisman/mspass
|
11bd292a778a2a0d8470734239a7347fe4a1c0a7
|
[
"BSD-3-Clause"
] | 1
|
2021-10-18T10:02:13.000Z
|
2021-10-18T10:02:13.000Z
|
python/tests/testpickle.py
|
seisman/mspass
|
11bd292a778a2a0d8470734239a7347fe4a1c0a7
|
[
"BSD-3-Clause"
] | null | null | null |
python/tests/testpickle.py
|
seisman/mspass
|
11bd292a778a2a0d8470734239a7347fe4a1c0a7
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
sys.path.append('/home/pavlis/src/mspass/python')
from mspasspy.ccore import CoreSeismogram
d=CoreSeismogram(200)
d.put_double('delta',1.0)
d.put_double('dt',1.0)
d.put('npts',200)
d.ns=200
d.t0=100.0
d.live=True
from mspasspy.ccore import Seismogram
d2=Seismogram(d,'invalid')
import pickle
x=pickle.dumps(d2)
print("pickle succeeded")
print("trying to restore")
d3=pickle.loads(x)
print("pickle loads completed")
print("data npts=",d.get_int('npts'))
print('same stored in struct of BasicTimeSeries=',d.ns)
print('data t0=',d.t0)
print('Now testing pickle for TimeSeries data')
from mspasspy.ccore import TimeSeries
from mspasspy.ccore import CoreTimeSeries
d0=CoreTimeSeries(500)
d0.live=True
d0.dt=1.0
d0.ns=500
d=TimeSeries(d0,'invalid')
s=pickle.dumps(d)
print('Pickle dumps succeeded')
print('size of string returned by pickle=',len(s))
print('Trying loads')
dr=pickle.loads(s)
print('Finished - BasicTimeSeries attributes in output')
print('ns=',dr.ns)
print('dt=',dr.dt)
print('t0=',dr.t0)
| 25.275
| 56
| 0.753709
|
import sys
sys.path.append('/home/pavlis/src/mspass/python')
from mspasspy.ccore import CoreSeismogram
d=CoreSeismogram(200)
d.put_double('delta',1.0)
d.put_double('dt',1.0)
d.put('npts',200)
d.ns=200
d.t0=100.0
d.live=True
from mspasspy.ccore import Seismogram
d2=Seismogram(d,'invalid')
import pickle
x=pickle.dumps(d2)
print("pickle succeeded")
print("trying to restore")
d3=pickle.loads(x)
print("pickle loads completed")
print("data npts=",d.get_int('npts'))
print('same stored in struct of BasicTimeSeries=',d.ns)
print('data t0=',d.t0)
print('Now testing pickle for TimeSeries data')
from mspasspy.ccore import TimeSeries
from mspasspy.ccore import CoreTimeSeries
d0=CoreTimeSeries(500)
d0.live=True
d0.dt=1.0
d0.ns=500
d=TimeSeries(d0,'invalid')
s=pickle.dumps(d)
print('Pickle dumps succeeded')
print('size of string returned by pickle=',len(s))
print('Trying loads')
dr=pickle.loads(s)
print('Finished - BasicTimeSeries attributes in output')
print('ns=',dr.ns)
print('dt=',dr.dt)
print('t0=',dr.t0)
| 0
| 0
| 0
|
c8ef1749db9bd82ca80ff83f68f94b743fdbc0d8
| 28,838
|
py
|
Python
|
pipeline/slices.py
|
PartumSomnia/bns_ppr_tools
|
b02bab870bb54171bc0d0cd7e07bfb50e978e7dd
|
[
"MIT"
] | null | null | null |
pipeline/slices.py
|
PartumSomnia/bns_ppr_tools
|
b02bab870bb54171bc0d0cd7e07bfb50e978e7dd
|
[
"MIT"
] | 4
|
2019-12-01T18:42:45.000Z
|
2019-12-07T10:59:37.000Z
|
pipeline/slices.py
|
PartumSomnia/bns_ppr_tools
|
b02bab870bb54171bc0d0cd7e07bfb50e978e7dd
|
[
"MIT"
] | null | null | null |
from __future__ import division
#from sys import path
#path.append('modules/')
import os.path
import click
import h5py
from argparse import ArgumentParser
from math import pi, log10
import sys
from scidata.utils import locate
import scidata.carpet.hdf5 as h5
from scidata.carpet.interp import Interpolator
import numpy as np
from glob import glob
import sys
sys.path.append("..")
from plotting.plotting_methods import PLOT_MANY_TASKS
from uutils import Printcolor, REFLEVEL_LIMITS
import config as Paths
from module_slices.slices_methods import COMPUTE_STORE
from module_slices.add_q_r_t_to_prof_xyxz import add_q_r_t_to_prof_xyxz
from module_slices.slices_dens_modes import compute_density_modes
__movie__ = "ffmpeg -framerate 10 -pattern_type glob -i '{}*.png' -s:v 1280x720 " \
"-c:v libx264 -module_profile:v high -crf 20 -pix_fmt yuv420p {}"
__tasklist__ = ["plot", "movie", "addm0", "dm"]
__reflevels__ = [0, 1, 2, 3, 4, 5, 6]
__outdirname__ = "module_slices"
__planes__ = ["xy", "xz"]
if __name__ == '__main__':
parser = ArgumentParser(description="postprocessing pipeline")
parser.add_argument("-s", dest="sim", required=True, help="name of the simulation dir")
parser.add_argument("-t", dest="tasklist", nargs='+', required=False, default=[], help="tasks to perform")
#
parser.add_argument("--v_n", dest="v_ns", nargs='+', required=False, default=[], help="variable names to compute")
parser.add_argument("--time", dest="times", nargs='+', required=False, default=[], help="times to iterate over [ms]")
parser.add_argument("--it", dest="it", nargs='+', required=False, default=[],
help="iterations to use ")
parser.add_argument("--rl", dest="reflevels", nargs='+', required=False, default=[], help="reflevels to use")
parser.add_argument('--plane', dest="plane", required=False, nargs='+', default=[], help='Plane: xy,xz,yz for slice analysis')
#
parser.add_argument("-o", dest="outdir", required=False, default=None, help="path for output dir")
parser.add_argument("-i", dest="indir", required=False, default=None, help="path to simulation dir")
parser.add_argument("-p", dest="path_to_profs", required=False, default=None, help="path to 3D profiles")
parser.add_argument("--overwrite", dest="overwrite", required=False, default="no", help="overwrite if exists")
#
args = parser.parse_args()
glob_sim = args.sim
glob_indir = args.indir
glob_outdir = args.outdir
glob_tasklist = args.tasklist
glob_overwrite = args.overwrite
glob_v_ns = args.v_ns
glob_times =args.times
glob_it = args.it
glob_reflevels = args.reflevels
glob_planes = args.plane
#
glob_profxyxz_path = args.path_to_profs#Paths.ppr_sims+glob_sim+'/profiles/'
#
if glob_indir is None:
glob_indir = Paths.default_data_dir + glob_sim + '/'
if not os.path.isdir(glob_indir):
raise IOError("Default path to simulation data is not valid: {}".format(glob_indir))
if not os.path.isdir(glob_indir):
raise IOError("Path to simulation data is not valid: {}".format(glob_indir))
if glob_outdir is None:
glob_outdir = Paths.default_ppr_dir + glob_sim + '/'
if not os.path.isdir(glob_indir):
raise IOError("Default path to postprocessed data is not valid: {}".format(glob_outdir))
if not os.path.isdir(glob_indir):
raise IOError("Path to postprocessed data is not valid: {}".format(glob_outdir))
if len(glob_tasklist) == 0:
raise NameError("tasklist is empty. Set what tasks to perform with '-t' option")
else:
for task in glob_tasklist:
if task not in __tasklist__:
raise NameError("task: {} is not among available ones: {}"
.format(task, __tasklist__))
if glob_overwrite == "no":
glob_overwrite = False
elif glob_overwrite == "yes":
glob_overwrite = True
else:
raise NameError("for '--overwrite' option use 'yes' or 'no'. Given: {}"
.format(glob_overwrite))
# glob_outdir_sim = Paths.ppr_sims + glob_sim
# if not os.path.isdir(glob_outdir_sim):
# os.mkdir(glob_outdir_sim)
# check plane
if len(glob_planes) == 0:
raise IOError("Option --plane unfilled")
elif len(glob_planes) == 1 and "all" in glob_planes:
glob_planes = __planes__
elif len(glob_planes) > 1:
for plane in glob_planes:
if not plane in __planes__:
raise NameError("plane:{} is not in the list of the __d3slicesplanes__:{}"
.format(plane, __planes__))
# set globals
# Paths.gw170817 = glob_simdir
# Paths.ppr_sims = glob_outdir
if len(glob_tasklist) == 1 and "all" in glob_tasklist:
# do all tasksk
pass
o_slice = COMPUTE_STORE(glob_sim, indir=glob_indir, pprdir=glob_outdir)
# deal with iterations and timesteps -- available as well as required by user
do_all_iterations = False
if len(glob_it) == 0 and len(glob_times) == 0:
raise IOError("please specify timesteps to use '--time' or iterations '--it' ")
elif len(glob_it) != 0 and len(glob_times) != 0:
raise IOError("please specify Either timesteps to use '--time' or iterations '--it' (not both)")
elif len(glob_times) == 0 and len(glob_it) == 1 and "all" in glob_it:
do_all_iterations = True
glob_times = o_slice.times
glob_it = o_slice.iterations
elif len(glob_it) == 0 and len(glob_times) == 1 and "all" in glob_times:
do_all_iterations = True
glob_times = o_slice.times
glob_it = o_slice.iterations
elif len(glob_it) > 0 and not "all" in glob_it and len(glob_times) == 0:
glob_it = np.array(glob_it, dtype=int) # array of iterations
glob_times = []
for it in glob_it:
glob_times.append(o_slice.get_time_for_it(it, "overall", "d2"))
glob_times = np.array(glob_times, dtype=float)
elif len(glob_times) > 0 and not "all" in glob_times and len(glob_it) == 0:
glob_times = np.array(glob_times, dtype=float) / 1e3 # back to seconds
else:
raise IOError("input times and iterations are not recognized: --time {} --it {}"
.format(glob_times, glob_it))
# deal with reflevels -- availble as well as required by user
do_all_reflevels = False
if len(glob_reflevels) == 1 and "all" in glob_reflevels:
glob_reflevels = __reflevels__
do_all_reflevels = True
else:
glob_reflevels = np.array(glob_reflevels, dtype=int)
# deal with variable names -- available as well as required by user
do_all_v_ns = False
if len(glob_v_ns) == 1 and "all" in glob_v_ns:
glob_v_ns=o_slice.list_v_ns
do_all_v_ns = True
else:
pass
# summarize what is avaialble and what is requried
if do_all_v_ns or do_all_iterations or do_all_reflevels:
Printcolor.yellow("Selected all", comma=True)
if do_all_iterations:
Printcolor.print_colored_string(["timesteps", "({})".format(len(glob_times))],
["blue", "green"], comma=True)
if do_all_v_ns: Printcolor.print_colored_string(["v_ns", "({})".format(len(glob_v_ns))],
["blue", "green"], comma=True)
if do_all_reflevels: Printcolor.print_colored_string(["reflevels", "({})".format(len(glob_reflevels))],
["blue", "green"], comma=True)
Printcolor.yellow("this might take time.")
# if not click.confirm(text="Confirm?",default=True,show_default=True):
# exit(0)
# perform tasks
do_tasks(glob_v_ns)
| 42.59675
| 154
| 0.515396
|
from __future__ import division
#from sys import path
#path.append('modules/')
import os.path
import click
import h5py
from argparse import ArgumentParser
from math import pi, log10
import sys
from scidata.utils import locate
import scidata.carpet.hdf5 as h5
from scidata.carpet.interp import Interpolator
import numpy as np
from glob import glob
import sys
sys.path.append("..")
from plotting.plotting_methods import PLOT_MANY_TASKS
from uutils import Printcolor, REFLEVEL_LIMITS
import config as Paths
from module_slices.slices_methods import COMPUTE_STORE
from module_slices.add_q_r_t_to_prof_xyxz import add_q_r_t_to_prof_xyxz
from module_slices.slices_dens_modes import compute_density_modes
__movie__ = "ffmpeg -framerate 10 -pattern_type glob -i '{}*.png' -s:v 1280x720 " \
"-c:v libx264 -module_profile:v high -crf 20 -pix_fmt yuv420p {}"
def __plot_data_for_a_slice(o_slice, v_n, it, t, rl, outdir):
# ---
data_arr = o_slice.get_data_rl(it, "xz", rl, v_n)
x_arr = o_slice.get_grid_v_n_rl(it, "xz", rl, "x")
z_arr = o_slice.get_grid_v_n_rl(it, "xz", rl, "z")
def_dic_xz = {'task': 'colormesh',
'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_arr, "yarr": z_arr, "zarr": data_arr,
'position': (1, 1), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {'location': 'right .03 -0.125', 'label': r'$\rho$ [geo]', # 'fmt': '%.1e',
'labelsize': 14,
'fontsize': 14},
'v_n_x': 'x', 'v_n_y': 'z', 'v_n': 'rho',
'xmin': None, 'xmax': None, 'ymin': None, 'ymax': None, 'vmin': 1e-10, 'vmax': 1e-4,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'mask': None, 'cmap': 'inferno_r', 'norm': "log", # 'inferno_r'
'fancyticks': True,
'title': {"text": r'${}$ [ms]'.format(0), 'fontsize': 14},
'sharex': True, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
data_arr = o_slice.get_data_rl(it, "xy", rl, v_n)
x_arr = o_slice.get_grid_v_n_rl(it, "xy", rl, "x")
y_arr = o_slice.get_grid_v_n_rl(it, "xy", rl, "y")
def_dic_xy = {'task': 'colormesh',
'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_arr, "yarr": y_arr, "zarr": data_arr,
'position': (2, 1), # 'title': '[{:.1f} ms]'.format(time_),
'cbar': {},
'v_n_x': 'x', 'v_n_y': 'y', 'v_n': 'rho',
'xmin': None, 'xmax': None, 'ymin': None, 'ymax': None, 'vmin': 1e-10, 'vmax': 1e-4,
'fill_vmin': False, # fills the x < vmin with vmin
'xscale': None, 'yscale': None,
'mask': None, 'cmap': 'inferno_r', 'norm': "log",
'fancyticks': True,
'title': {},
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14
}
# setting scales and limits for data
if v_n == "rho":
def_dic_xz['v_n'] = 'rho'
def_dic_xz['vmin'] = 1e-10
def_dic_xz['vmax'] = 1e-4
def_dic_xz['cbar']['label'] = r'$\rho$ [geo]'
def_dic_xz['cmap'] = 'Greys_r'
def_dic_xy['v_n'] = 'rho'
def_dic_xy['vmin'] = 1e-10
def_dic_xy['vmax'] = 1e-4
def_dic_xy['cmap'] = 'Greys_r'
elif v_n == "dens_unbnd":
def_dic_xz['v_n'] = 'rho'
def_dic_xz['vmin'] = 1e-13
def_dic_xz['vmax'] = 1e-6
def_dic_xz['cbar']['label'] = r'$D_{\rm{unb}}$ [geo]'
def_dic_xy['v_n'] = 'rho'
def_dic_xy['vmin'] = 1e-13
def_dic_xy['vmax'] = 1e-6
elif v_n == "Y_e":
def_dic_xz['v_n'] = 'Y_e'
def_dic_xz['vmin'] = 0.05
def_dic_xz['vmax'] = 0.5
def_dic_xz['cbar']['label'] = r'$Y_e$ [geo]'
def_dic_xz['norm'] = "linear"
def_dic_xz['cmap'] = 'inferno'
def_dic_xy['v_n'] = 'Y_e'
def_dic_xy['vmin'] = 0.05
def_dic_xy['vmax'] = 0.5
def_dic_xy['norm'] = "linear"
def_dic_xy['cmap'] = 'inferno'
elif v_n == "temp" or v_n == "temperature":
def_dic_xz['v_n'] = 'temperature'
def_dic_xz['vmin'] = 1e-2
def_dic_xz['vmax'] = 1e2
def_dic_xz['cbar']['label'] = r'$Temperature$ [geo]'
def_dic_xy['v_n'] = 'temperature'
def_dic_xy['vmin'] = 1e-2
def_dic_xy['vmax'] = 1e2
elif v_n == 'entropy' or v_n == "s_phi":
def_dic_xz['v_n'] = 'entropy'
def_dic_xz['vmin'] = 1e-1
def_dic_xz['vmax'] = 1e2
def_dic_xz['cbar']['label'] = r'$Entropy$ [geo]'
def_dic_xy['v_n'] = 'entropy'
def_dic_xy['vmin'] = 1e-1
def_dic_xy['vmax'] = 1e2
elif v_n == "Q_eff_nua":
def_dic_xz['v_n'] = 'Q_eff_nua'
def_dic_xz['vmin'] = 1e-18
def_dic_xz['vmax'] = 1e-14
def_dic_xz['cbar']['label'] = r'$Q_eff_nua$ [geo]'.replace('_', '\_')
def_dic_xy['v_n'] = 'Q_eff_nua'
def_dic_xy['vmin'] = 1e-18
def_dic_xy['vmax'] = 1e-14
elif v_n == "Q_eff_nue":
def_dic_xz['v_n'] = 'Q_eff_nue'
def_dic_xz['vmin'] = 1e-18
def_dic_xz['vmax'] = 1e-14
def_dic_xz['cbar']['label'] = r'$Q_eff_nue$ [geo]'.replace('_', '\_')
def_dic_xy['v_n'] = 'Q_eff_nue'
def_dic_xy['vmin'] = 1e-18
def_dic_xy['vmax'] = 1e-14
elif v_n == "Q_eff_nux":
def_dic_xz['v_n'] = 'Q_eff_nux'
def_dic_xz['vmin'] = 1e-18
def_dic_xz['vmax'] = 1e-14
def_dic_xz['cbar']['label'] = r'$Q_eff_nux$ [geo]'.replace('_', '\_')
def_dic_xy['v_n'] = 'Q_eff_nux'
def_dic_xy['vmin'] = 1e-18
def_dic_xy['vmax'] = 1e-14
elif v_n == "R_eff_nua":
def_dic_xz['v_n'] = 'R_eff_nua'
def_dic_xz['vmin'] = 1e-9
def_dic_xz['vmax'] = 1e-5
def_dic_xz['cbar']['label'] = r'$R_eff_nua$ [geo]'.replace('_', '\_')
def_dic_xy['v_n'] = 'R_eff_nue'
def_dic_xy['vmin'] = 1e-9
def_dic_xy['vmax'] = 1e-5
elif v_n == "R_eff_nue":
def_dic_xz['v_n'] = 'R_eff_nue'
def_dic_xz['vmin'] = 1e-9
def_dic_xz['vmax'] = 1e-5
def_dic_xz['cbar']['label'] = r'$R_eff_nue$ [geo]'.replace('_', '\_')
def_dic_xy['v_n'] = 'R_eff_nue'
def_dic_xy['vmin'] = 1e-9
def_dic_xy['vmax'] = 1e-5
elif v_n == "R_eff_nux":
def_dic_xz['v_n'] = 'R_eff_nux'
def_dic_xz['vmin'] = 1e-9
def_dic_xz['vmax'] = 1e-5
def_dic_xz['cbar']['label'] = r'$R_eff_nux$ [geo]'.replace('_', '\_')
def_dic_xy['v_n'] = 'R_eff_nux'
def_dic_xy['vmin'] = 1e-9
def_dic_xy['vmax'] = 1e-5
elif v_n == "optd_0_nua":
def_dic_xz['v_n'] = 'optd_0_nua'
def_dic_xz['vmin'] = 1e-5
def_dic_xz['vmax'] = 1e-2
def_dic_xz['cbar']['label'] = r'$optd_0_nua$ [geo]'.replace('_', '\_')
# def_dic_xz['norm'] = "linear"
def_dic_xz['cmap'] = 'inferno'
def_dic_xy['v_n'] = 'optd_0_nua'
def_dic_xy['vmin'] = 1e-5
def_dic_xy['vmax'] = 1e-1
# def_dic_xy['norm'] = "linear"
def_dic_xy['cmap'] = 'inferno'
elif v_n == "optd_0_nue":
def_dic_xz['v_n'] = 'optd_0_nue'
def_dic_xz['vmin'] = 1e-5
def_dic_xz['vmax'] = 1e-2
def_dic_xz['cbar']['label'] = r'$optd_0_nue$ [geo]'.replace('_', '\_')
# def_dic_xz['norm'] = "linear"
def_dic_xz['cmap'] = 'inferno'
def_dic_xy['v_n'] = 'optd_0_nue'
def_dic_xy['vmin'] = 1e-5
def_dic_xy['vmax'] = 1e-1
# def_dic_xy['norm'] = "linear"
def_dic_xy['cmap'] = 'inferno'
else: raise NameError("v_n:{} not recognized".format(v_n))
#
contour_dic_xy = {
'task': 'contour',
'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_arr, "yarr": y_arr, "zarr": data_arr, 'levels': [1.e13 / 6.176e+17],
'position': (2, 1), # 'title': '[{:.1f} ms]'.format(time_),
'colors':['black'], 'lss':["-"], 'lws':[1.],
'v_n_x': 'x', 'v_n_y': 'y', 'v_n': 'rho',
'xscale': None, 'yscale': None,
'fancyticks': True,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14}
# setting boundaries for plots
xmin, xmax, ymin, ymax, zmin, zmax = REFLEVEL_LIMITS.get(rl)
def_dic_xy['xmin'], def_dic_xy['xmax'] = xmin, xmax
def_dic_xy['ymin'], def_dic_xy['ymax'] = ymin, ymax
def_dic_xz['xmin'], def_dic_xz['xmax'] = xmin, xmax
def_dic_xz['ymin'], def_dic_xz['ymax'] = zmin, zmax
if not os.path.isdir(outdir):
raise IOError("Outdir does not exists".format(outdir))
# plotting
o_plot = PLOT_MANY_TASKS()
o_plot.gen_set["figdir"] = outdir
o_plot.gen_set["type"] = "cartesian"
o_plot.gen_set["dpi"] = 128
o_plot.gen_set["figsize"] = (4.2, 8.0) # <->, |] # to match hists with (8.5, 2.7)
o_plot.gen_set["figname"] = "{0:07d}.png".format(int(it))
o_plot.gen_set["sharex"] = False
o_plot.gen_set["sharey"] = False
o_plot.gen_set["subplots_adjust_h"] = -0.35
o_plot.gen_set["subplots_adjust_w"] = 0.2
o_plot.gen_set['style'] = 'dark_background'
o_plot.set_plot_dics = []
def_dic_xz["it"] = int(it)
def_dic_xz["title"]["text"] = r'$t:{:.1f}ms$'.format(float(t * 1e3))
o_plot.set_plot_dics.append(def_dic_xz)
def_dic_xy["it"] = int(it)
o_plot.set_plot_dics.append(def_dic_xy)
if v_n == "rho":
o_plot.set_plot_dics.append(contour_dic_xy)
# plot reflevel boundaries
for rl in range(o_slice.nlevels):
try:
x_arr = o_slice.get_grid_v_n_rl(it, "xy", rl, "x")
y_arr = o_slice.get_grid_v_n_rl(it, "xy", rl, "y")
x_b = [x_arr.min(), x_arr.max()]
y_b = [y_arr.min(), y_arr.max()]
#
for x_b_line, y_b_line in zip([[x_b[0], x_b[-1]], [x_b[0], x_b[0]], [x_b[0], x_b[-1]], [x_b[-1], x_b[-1]]],
[[y_b[0], y_b[0]], [y_b[0], y_b[-1]], [y_b[-1], y_b[-1]], [y_b[-1], y_b[0]]]):
#
contour_dic_xy = {
'task': 'line',
'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_b_line, "yarr": y_b_line,
'position': (2, 1), # 'title': '[{:.1f} ms]'.format(time_),
'color': 'cyan', 'ls': "-", 'lw': 1., 'alpha': 1., 'ds': 'default',
'v_n_x': 'x', 'v_n_y': 'y', 'v_n': 'rho',
'xscale': None, 'yscale': None,
'fancyticks': True,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14}
o_plot.set_plot_dics.append(contour_dic_xy)
#
x_arr = o_slice.get_grid_v_n_rl(it, "xz", rl, "x")
z_arr = o_slice.get_grid_v_n_rl(it, "xz", rl, "z")
x_b = [x_arr.min(), x_arr.max()]
z_b = [z_arr.min(), z_arr.max()]
#
for x_b_line, z_b_line in zip([[x_b[0], x_b[-1]], [x_b[0], x_b[0]], [x_b[0], x_b[-1]], [x_b[-1], x_b[-1]]],
[[z_b[0], z_b[0]], [z_b[0], z_b[-1]], [z_b[-1], z_b[-1]], [z_b[-1], z_b[0]]]):
#
contour_dic_xz = {
'task': 'line',
'ptype': 'cartesian', 'aspect': 1.,
'xarr': x_b_line, "yarr": z_b_line,
'position': (1, 1), # 'title': '[{:.1f} ms]'.format(time_),
'color': 'cyan', 'ls': "-", 'lw': 1., 'alpha': 1., 'ds': 'default',
'v_n_x': 'x', 'v_n_y': 'y', 'v_n': 'rho',
'xscale': None, 'yscale': None,
'fancyticks': True,
'sharex': False, # removes angular citkscitks
'fontsize': 14,
'labelsize': 14}
o_plot.set_plot_dics.append(contour_dic_xz)
except IndexError:
Printcolor.print_colored_string(["it:", str(it), "rl:", str(rl), "IndexError"],
["blue", "green", "blue", "green", "red"])
o_plot.main()
o_plot.set_plot_dics = []
# plotfpath = outdir + "{0:07d}.png".format(int(it))
# if True:
# if (os.path.isfile(plotfpath) and rewrite) or not os.path.isfile(plotfpath):
# if os.path.isfile(plotfpath): os.remove(plotfpath)
# Printcolor.print_colored_string(
# ["task:", "plot slice", "t:", "{:.1f} [ms] ({:d}/{:d})".format(t*1e3, i, len(list_times)),
# "rl:", "{}".format(rl), "v_n:", v_n, ':', "plotting"],
# ["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "green"]
# )
# # --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
#
# def_dic_xz["it"] = int(it)
# def_dic_xz["title"]["text"] = r'$t:{:.1f}ms$'.format(float(t*1e3))
# o_plot.set_plot_dics.append(def_dic_xz)
#
# def_dic_xy["it"] = int(it)
# o_plot.set_plot_dics.append(def_dic_xy)
#
# o_plot.main()
# o_plot.set_plot_dics = []
#
# # --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
# else:
# Printcolor.print_colored_string(
# ["task:", "plot slice", "t:", "{:.1f} [ms] ({:d}/{:d})".format(t * 1e3, i, len(list_times)), "rl:",
# "{}".format(rl), "v_n:", v_n, ':', "skipping"],
# ["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "blue"]
# )
#
# # except KeyboardInterrupt:
# # exit(1)
# else:
# Printcolor.print_colored_string(
# ["task:", "plot slice", "t:", "{:.1f} [ms] ({:d}/{:d})".format(t * 1e3, i, len(list_times)), "rl:",
# "{}".format(rl), "v_n:", v_n, ':', "failed"],
# ["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"]
# )
def plot_selected_data(o_slice, v_ns, times, rls, rootdir, rewrite=False):
_, d2it, d2t = o_slice.get_ittime("overall", d1d2d3prof="d2")
if len(d2it) == 0:
raise ValueError("No d2 data found in ittime.h5")
for t in times:
if t > d2t.max():
raise ValueError("given t:{} is above max time available:{}"
.format(t, d2t.max()))
if t < d2t.min():
raise ValueError("given t:{} is below min time available:{}"
.format(t, d2t.min()))
i = 1
for t in times:
nearest_time = o_slice.get_nearest_time(t, d1d2d3="d2")
it = o_slice.get_it_for_time(nearest_time, d1d2d3="d2")
for v_n in v_ns:
outdir_ = rootdir + v_n + '/'
if not os.path.isdir(outdir_):
os.mkdir(outdir_)
for rl in rls:
outdir__ = outdir_ + str("rl_{:d}".format(rl)) + '/'
if not os.path.isdir(outdir__):
os.mkdir(outdir__)
plotfpath = outdir__ + "{0:07d}.png".format(int(it))
if True:
if (os.path.isfile(plotfpath) and rewrite) or not os.path.isfile(plotfpath):
if os.path.isfile(plotfpath): os.remove(plotfpath)
Printcolor.print_colored_string(
["task:", "plot slice", "t:", "{:.1f} [ms] ({:d}/{:d})".format(t * 1e3, i, len(times)),
"rl:", "{}".format(rl), "v_n:", v_n, ':', "plotting"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "green"]
)
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
__plot_data_for_a_slice(o_slice, v_n, it, t, rl, outdir__)
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
else:
Printcolor.print_colored_string(
["task:", "plot slice", "t:", "{:.1f} [ms] ({:d}/{:d})".format(t * 1e3, i, len(times)),
"rl:",
"{}".format(rl), "v_n:", v_n, ':', "skipping"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "blue"]
)
# except KeyboardInterrupt:
# exit(1)
# except:
# Printcolor.print_colored_string(
# ["task:", "plot slice", "t:", "{:.1f} [ms] ({:d}/{:d})".format(t * 1e3, i, len(times)),
# "rl:",
# "{}".format(rl), "v_n:", v_n, ':', "failed"],
# ["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"]
# )
sys.stdout.flush()
i += 1
def make_movie(v_ns, rls, rootdir, rewrite=False):
rewrite = True
for v_n in v_ns:
outdir_ = rootdir + v_n + '/'
if not os.path.isdir(outdir_):
os.mkdir(outdir_)
for rl in rls:
outdir__ = outdir_ + str("rl_{:d}".format(rl)) + '/'
if not os.path.isdir(outdir__):
os.mkdir(outdir__)
fname = "{}_rl{}.mp4".format(v_n, rl)
moviefath = outdir__ + fname
nfiles = len(glob(outdir__))
if nfiles < 1:
Printcolor.red("No plots found to make a movie in: {}".format(outdir__))
break
try:
if (os.path.isfile(moviefath) and rewrite) or not os.path.isfile(moviefath):
if os.path.isfile(moviefath): os.remove(moviefath)
Printcolor.print_colored_string(
["task:", "movie slice", "N files", "{:d}".format(nfiles),
"rl:", "{}".format(rl), "v_n:", v_n, ':', "plotting"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "green"]
)
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
# ffmpeg -framerate 10 -pattern_type glob -i "*.png" -s:v 1280x720 -c:v libx264 -module_profile:v high -crf 20 -pix_fmt yuv420p dt.mp4
os.system(__movie__.format(outdir__, outdir__ + fname))
# --- --- --- --- --- --- --- --- --- --- --- --- --- --- --- ---
else:
Printcolor.print_colored_string(
["task:", "movie slice", "N files", "{:d}".format(nfiles),
"rl:",
"{}".format(rl), "v_n:", v_n, ':', "skipping"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "blue"]
)
except KeyboardInterrupt:
exit(1)
except:
Printcolor.print_colored_string(
["task:", "plot slice", "N files", "{:d}".format(nfiles),
"rl:",
"{}".format(rl), "v_n:", v_n, ':', "failed"],
["blue", "green", "blue", "green", "blue", "green", "blue", "green", "", "red"]
)
__tasklist__ = ["plot", "movie", "addm0", "dm"]
__reflevels__ = [0, 1, 2, 3, 4, 5, 6]
__outdirname__ = "module_slices"
__planes__ = ["xy", "xz"]
def do_tasks(glob_v_ns):
for task in glob_tasklist:
# do tasks one by one
if task == "plot":
assert len(glob_v_ns) > 0
assert len(glob_times) > 0
assert len(glob_reflevels) > 0
outdir = glob_outdir + __outdirname__ + '/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
outdir += 'plots/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
plot_selected_data(o_slice, glob_v_ns, glob_times, glob_reflevels, outdir, rewrite=glob_overwrite)
if task == "movie":
assert len(glob_v_ns) > 0
assert len(glob_times) > 0
assert len(glob_reflevels) > 0
outdir = glob_outdir + __outdirname__ + '/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
outdir += 'movie/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
plot_selected_data(o_slice, glob_v_ns, glob_times, glob_reflevels, outdir, rewrite=glob_overwrite)
assert len(glob_v_ns) > 0
assert len(glob_reflevels) > 0
outdir = glob_outdir + __outdirname__ + '/'
if not os.path.isdir(outdir):
os.mkdir(outdir)
outdir += 'movie/'
make_movie(glob_v_ns, glob_reflevels, outdir, rewrite=glob_overwrite)
if task == "addm0":
if len(glob_v_ns) == len(o_slice.list_v_ns):
glob_v_ns = o_slice.list_neut_v_ns
print glob_it
add_q_r_t_to_prof_xyxz(
v_ns=glob_v_ns,
rls=glob_reflevels,
planes=glob_planes,
iterations=glob_it,
sim=glob_sim,
indir=glob_indir,
pprdir=glob_outdir,
path_to_sliced_profiles=glob_profxyxz_path,
overwrite=glob_overwrite
)
if task == "dm":
outdir = Paths.default_ppr_dir + glob_sim + '/' + __outdirname__ + '/'
compute_density_modes(o_slice, glob_reflevels, outdir, rewrite=glob_overwrite)
if __name__ == '__main__':
parser = ArgumentParser(description="postprocessing pipeline")
parser.add_argument("-s", dest="sim", required=True, help="name of the simulation dir")
parser.add_argument("-t", dest="tasklist", nargs='+', required=False, default=[], help="tasks to perform")
#
parser.add_argument("--v_n", dest="v_ns", nargs='+', required=False, default=[], help="variable names to compute")
parser.add_argument("--time", dest="times", nargs='+', required=False, default=[], help="times to iterate over [ms]")
parser.add_argument("--it", dest="it", nargs='+', required=False, default=[],
help="iterations to use ")
parser.add_argument("--rl", dest="reflevels", nargs='+', required=False, default=[], help="reflevels to use")
parser.add_argument('--plane', dest="plane", required=False, nargs='+', default=[], help='Plane: xy,xz,yz for slice analysis')
#
parser.add_argument("-o", dest="outdir", required=False, default=None, help="path for output dir")
parser.add_argument("-i", dest="indir", required=False, default=None, help="path to simulation dir")
parser.add_argument("-p", dest="path_to_profs", required=False, default=None, help="path to 3D profiles")
parser.add_argument("--overwrite", dest="overwrite", required=False, default="no", help="overwrite if exists")
#
args = parser.parse_args()
glob_sim = args.sim
glob_indir = args.indir
glob_outdir = args.outdir
glob_tasklist = args.tasklist
glob_overwrite = args.overwrite
glob_v_ns = args.v_ns
glob_times =args.times
glob_it = args.it
glob_reflevels = args.reflevels
glob_planes = args.plane
#
glob_profxyxz_path = args.path_to_profs#Paths.ppr_sims+glob_sim+'/profiles/'
#
if glob_indir is None:
glob_indir = Paths.default_data_dir + glob_sim + '/'
if not os.path.isdir(glob_indir):
raise IOError("Default path to simulation data is not valid: {}".format(glob_indir))
if not os.path.isdir(glob_indir):
raise IOError("Path to simulation data is not valid: {}".format(glob_indir))
if glob_outdir is None:
glob_outdir = Paths.default_ppr_dir + glob_sim + '/'
if not os.path.isdir(glob_indir):
raise IOError("Default path to postprocessed data is not valid: {}".format(glob_outdir))
if not os.path.isdir(glob_indir):
raise IOError("Path to postprocessed data is not valid: {}".format(glob_outdir))
if len(glob_tasklist) == 0:
raise NameError("tasklist is empty. Set what tasks to perform with '-t' option")
else:
for task in glob_tasklist:
if task not in __tasklist__:
raise NameError("task: {} is not among available ones: {}"
.format(task, __tasklist__))
if glob_overwrite == "no":
glob_overwrite = False
elif glob_overwrite == "yes":
glob_overwrite = True
else:
raise NameError("for '--overwrite' option use 'yes' or 'no'. Given: {}"
.format(glob_overwrite))
# glob_outdir_sim = Paths.ppr_sims + glob_sim
# if not os.path.isdir(glob_outdir_sim):
# os.mkdir(glob_outdir_sim)
# check plane
if len(glob_planes) == 0:
raise IOError("Option --plane unfilled")
elif len(glob_planes) == 1 and "all" in glob_planes:
glob_planes = __planes__
elif len(glob_planes) > 1:
for plane in glob_planes:
if not plane in __planes__:
raise NameError("plane:{} is not in the list of the __d3slicesplanes__:{}"
.format(plane, __planes__))
# set globals
# Paths.gw170817 = glob_simdir
# Paths.ppr_sims = glob_outdir
if len(glob_tasklist) == 1 and "all" in glob_tasklist:
# do all tasksk
pass
o_slice = COMPUTE_STORE(glob_sim, indir=glob_indir, pprdir=glob_outdir)
# deal with iterations and timesteps -- available as well as required by user
do_all_iterations = False
if len(glob_it) == 0 and len(glob_times) == 0:
raise IOError("please specify timesteps to use '--time' or iterations '--it' ")
elif len(glob_it) != 0 and len(glob_times) != 0:
raise IOError("please specify Either timesteps to use '--time' or iterations '--it' (not both)")
elif len(glob_times) == 0 and len(glob_it) == 1 and "all" in glob_it:
do_all_iterations = True
glob_times = o_slice.times
glob_it = o_slice.iterations
elif len(glob_it) == 0 and len(glob_times) == 1 and "all" in glob_times:
do_all_iterations = True
glob_times = o_slice.times
glob_it = o_slice.iterations
elif len(glob_it) > 0 and not "all" in glob_it and len(glob_times) == 0:
glob_it = np.array(glob_it, dtype=int) # array of iterations
glob_times = []
for it in glob_it:
glob_times.append(o_slice.get_time_for_it(it, "overall", "d2"))
glob_times = np.array(glob_times, dtype=float)
elif len(glob_times) > 0 and not "all" in glob_times and len(glob_it) == 0:
glob_times = np.array(glob_times, dtype=float) / 1e3 # back to seconds
else:
raise IOError("input times and iterations are not recognized: --time {} --it {}"
.format(glob_times, glob_it))
# deal with reflevels -- availble as well as required by user
do_all_reflevels = False
if len(glob_reflevels) == 1 and "all" in glob_reflevels:
glob_reflevels = __reflevels__
do_all_reflevels = True
else:
glob_reflevels = np.array(glob_reflevels, dtype=int)
# deal with variable names -- available as well as required by user
do_all_v_ns = False
if len(glob_v_ns) == 1 and "all" in glob_v_ns:
glob_v_ns=o_slice.list_v_ns
do_all_v_ns = True
else:
pass
# summarize what is avaialble and what is requried
if do_all_v_ns or do_all_iterations or do_all_reflevels:
Printcolor.yellow("Selected all", comma=True)
if do_all_iterations:
Printcolor.print_colored_string(["timesteps", "({})".format(len(glob_times))],
["blue", "green"], comma=True)
if do_all_v_ns: Printcolor.print_colored_string(["v_ns", "({})".format(len(glob_v_ns))],
["blue", "green"], comma=True)
if do_all_reflevels: Printcolor.print_colored_string(["reflevels", "({})".format(len(glob_reflevels))],
["blue", "green"], comma=True)
Printcolor.yellow("this might take time.")
# if not click.confirm(text="Confirm?",default=True,show_default=True):
# exit(0)
# perform tasks
do_tasks(glob_v_ns)
| 20,890
| 0
| 92
|
e816797defd71414f450b8e5d91abae6b9cf9f15
| 7,532
|
py
|
Python
|
heartpredictions/LogisticRegression/Trainer.py
|
Dianevera/heart-prediction
|
c11e4ce92d501e1a398ee31b44d1552d8c6a29c5
|
[
"MIT"
] | null | null | null |
heartpredictions/LogisticRegression/Trainer.py
|
Dianevera/heart-prediction
|
c11e4ce92d501e1a398ee31b44d1552d8c6a29c5
|
[
"MIT"
] | 32
|
2021-09-27T17:32:19.000Z
|
2022-01-28T20:06:07.000Z
|
heartpredictions/LogisticRegression/Trainer.py
|
Dianevera/heart-prediction
|
c11e4ce92d501e1a398ee31b44d1552d8c6a29c5
|
[
"MIT"
] | 1
|
2021-11-03T13:29:44.000Z
|
2021-11-03T13:29:44.000Z
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from torch import nn
import torch
| 39.642105
| 149
| 0.525358
|
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from torch import nn
import torch
class Trainer:
def __init__(self, model, class_weights, save_directory, loss='dl', lr=0.5, label_name = ""):
"""
Create the Trainer object.
Parameters:
model (Model): The model we are going to train
class_weights ([float]): The class_weights
save_directory (string): The path where we will save the weights
loss (string): The name of the loss function (see below for me details)
label_name (string): The label we are training on
"""
possible_loss = {'nllloss' : nn.NLLLoss(weight=class_weights, reduction='mean'),
'cross' : nn.CrossEntropyLoss(weight=class_weights), 'mse' : nn.MSELoss(reduction='mean'),
'BCEloss' : nn.BCELoss(), 'BCElogits' : nn.BCEWithLogitsLoss(weight=class_weights)}
self.model = model
self.criterion = possible_loss[loss]
self.optimizer = torch.optim.SGD(model.parameters(), lr=lr)
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='max', factor=0.6, patience=2, cooldown=2)
self.history = {'lr': [], 'loss': [], 'acc':[], 'val_loss': [], 'val_acc':[]}
self.max_val_acc = float('-inf')
self.save_dir = save_directory
self.label_name = label_name
def fit(self, train_dataloader, val_dataloader, nb_epochs):
"""
The fit function.
Parameters:
train_dataloader (dataloader): The train data loader
val_dataloader (dataloader): The validation data loader
nb_epochs (int): Number of epochs we will train each trainer for
Returns:
trainers ([Trainer]): All the trainers we just trained
"""
print(f'==== Training {self.label_name} ====\n')
for epoch in range(nb_epochs):
print(f'Epoch {epoch + 1} / {nb_epochs}')
train_loss = val_loss = train_acc = val_acc = 0.0
self.model.train()
pbar = tf.keras.utils.Progbar(target=len(train_dataloader))
for i, batch in enumerate(train_dataloader):
inputs, labels = batch
# Clear gradients w.r.t. parameters
self.optimizer.zero_grad()
# Forward pass to get output/logits
output = self.model(inputs)
# Calculate Loss
loss = self.criterion(output, labels)
train_loss += loss
train_acc += 1 if np.argmax(labels.detach().numpy()[0]) == np.argmax(output.detach().numpy()[0]) else 0
# Getting gradients w.r.t. parameters
loss.backward()
pbar.update(i + 1, values=
[
("loss", train_loss.item()/(i + 1)),
("acc", train_acc/(i + 1)),
("lr", self.scheduler.optimizer.param_groups[0]['lr'])
])
# Updating parameters
self.optimizer.step()
print('Validation')
self.model.eval()
pbar = tf.keras.utils.Progbar(target=len(val_dataloader))
with torch.no_grad():
for i, batch in enumerate(val_dataloader):
inputs, labels = batch
output = self.model(inputs)
val_loss += loss
val_acc += 1 if np.argmax(labels.detach().numpy()[0]) == np.argmax(output.detach().numpy()[0]) else 0
pbar.update(i + 1, values=
[
("loss", val_loss.item()/(i + 1)),
("acc", val_acc/(i + 1)),
("lr", self.scheduler.optimizer.param_groups[0]['lr'])
])
train_loss = train_loss / len(train_dataloader)
train_acc = train_acc / len(train_dataloader)
val_loss = val_loss / len(val_dataloader)
val_acc = val_acc / len(val_dataloader)
lr = self.scheduler.optimizer.param_groups[0]['lr']
self.scheduler.step(val_loss)
self.history['lr'].append(lr)
self.history['loss'].append(train_loss)
self.history['val_loss'].append(val_loss)
self.history['acc'].append(train_acc)
self.history['val_acc'].append(val_acc)
if val_acc > self.max_val_acc:
print(f'Model saved. Acc updated: {self.max_val_acc:.3f} -> {val_acc:.3f}')
self.max_val_acc = val_acc
torch.save(self.model.state_dict(), f'{self.save_dir}/logistic_regression_{self.label_name}.pt')
def evaluate(self, test_dataloader, display=True):
"""
The evaluation function. Test our model.
Parameters:
test_dataloader (dataloader): The test data loader
display (Bool): If true we display the Iteration, Loss, Accuracy and total loss as we go along the evaluation
Returns:
total_accuracy (float): The accuracy we got on the test datloader
"""
print(f'==== Evaluate {self.label_name} ====\n')
correct = total_loss = total = 0.0
self.model.eval()
with torch.no_grad():
for i, (inputs, labels) in enumerate(test_dataloader):
pred = self.model(inputs)
loss = self.criterion(pred, labels)
total_loss += loss
# Total correct predictions
correct += 1 if np.argmax(labels.detach().numpy()[0]) == np.argmax(pred.detach().numpy()[0]) else 0
total_accuracy = 100 * correct / len(test_dataloader)
if display:
print('Iteration: {}. Loss: {}. Accuracy: {}. total loss: {}.'.format(len(test_dataloader), loss.item(), total_accuracy, total_loss))
return total_accuracy
def display_history(self, accuracy=True, loss=False):
"""
Plot the evolution of the accuracy and loss.
Parameters:
accuracy (bool): If True we plot the accuracy
loss (Bool): If true we plot the loss
"""
if loss:
plt.figure(figsize=(6,6))
plt.plot(self.history['loss'], label="Loss")
plt.plot(self.history['val_loss'], label="Validation loss")
plt.ylabel('Loss', fontsize=10)
plt.xlabel('Epochs', fontsize=10)
plt.legend()
plt.show()
if accuracy:
plt.figure(figsize=(6,6))
plt.plot(self.history['acc'], label="Accuracy")
plt.plot(self.history['val_acc'], label="Validation accuracy")
plt.ylabel('Accuracy', fontsize=10)
plt.xlabel('Epochs', fontsize=10)
plt.legend()
plt.show()
def load_weights(self, path):
"""
load the weights.
Parameters:
path (string): The weights path
"""
self.model.load_state_dict(torch.load(path))
self.model.eval()
| 0
| 7,399
| 23
|
cb4d80b54499c7f044d5d3bb7aa86ffb23658862
| 28
|
py
|
Python
|
exipicrename/__init__.py
|
unixhex/exipicrename2
|
b2a2f5af224c4a2c93f81e48c2622c7522d76489
|
[
"MIT"
] | 1
|
2020-02-14T13:41:28.000Z
|
2020-02-14T13:41:28.000Z
|
exipicrename/__init__.py
|
unixhex/exipicrename2
|
b2a2f5af224c4a2c93f81e48c2622c7522d76489
|
[
"MIT"
] | 3
|
2021-06-08T19:46:29.000Z
|
2022-03-11T23:44:57.000Z
|
exipicrename/__init__.py
|
unixhex/exipicrename2
|
b2a2f5af224c4a2c93f81e48c2622c7522d76489
|
[
"MIT"
] | null | null | null |
from .exipicrename import *
| 14
| 27
| 0.785714
|
from .exipicrename import *
| 0
| 0
| 0
|
7f8c1278840d863be1b8121c2143ec6ac691623e
| 2,392
|
py
|
Python
|
QuickSort.py
|
apotato369550/super-simple-sortr
|
0b85513a3fc6e426618719577a0d7ccd9385ff77
|
[
"MIT"
] | null | null | null |
QuickSort.py
|
apotato369550/super-simple-sortr
|
0b85513a3fc6e426618719577a0d7ccd9385ff77
|
[
"MIT"
] | null | null | null |
QuickSort.py
|
apotato369550/super-simple-sortr
|
0b85513a3fc6e426618719577a0d7ccd9385ff77
|
[
"MIT"
] | 1
|
2022-03-09T06:46:14.000Z
|
2022-03-09T06:46:14.000Z
|
from Algorithims import Algorithms
import time
import threading
| 31.064935
| 123
| 0.562291
|
from Algorithims import Algorithms
import time
import threading
class QuickSort(Algorithms):
def __init__(self, data, delay):
Algorithms.__init__(self)
self.data = data
self.delay = delay
sorting_thread = threading.Thread(target=self.sort, args=(self.data, 0, len(data) - 1, self.drawData, delay, True))
sorting_thread.daemon = True
sorting_thread.start()
self.mainloop()
def partition(self, data, head, tail, drawData, delay):
border = head
pivot = data[tail]
drawData(data, self.getColorArray(len(data), head, tail, border, border))
time.sleep(delay)
for i in range(head, tail):
if data[i] < pivot:
drawData(data, self.getColorArray(len(data), head, tail, border, i, True))
time.sleep(delay)
data[border], data[i] = data[i], data[border]
border += 1
drawData(data, self.getColorArray(len(data), head, tail, border, i))
time.sleep(delay)
drawData(data, self.getColorArray(len(data), head, tail, border, tail, True))
time.sleep(delay)
data[border], data[tail] = data[tail], data[border]
return border
def sort(self, data, head, tail, drawData, delay, main):
if head < tail:
partition_index = self.partition(data, head, tail, drawData, delay)
# Left partition
self.sort(data, head, partition_index - 1, drawData, delay, False)
# Right partition
self.sort(data, partition_index + 1, tail, drawData, delay, False)
if main:
drawData(data, ["green" for x in range(len(data))])
def getColorArray(self, data_length, head, tail, border, current_index, is_swapping=False):
color_array = []
for i in range(data_length):
if i >= head and i <= tail:
color_array.append("grey")
else:
color_array.append("white")
if i == tail:
color_array[i] = "blue"
elif i == border:
color_array[i] = "red"
elif i == current_index:
color_array[i] = "yellow"
if is_swapping:
if i == border or i == current_index:
color_array[i] = "green"
return color_array
| 2,190
| 7
| 130
|
02be5c99a347950f27c83cbd18c1524887f1c17e
| 6,488
|
py
|
Python
|
src/extract_ml_features.py
|
amansinha09/HSDS
|
dd7cab75bd79a2cec1b9278215303b5e34e58e89
|
[
"MIT"
] | null | null | null |
src/extract_ml_features.py
|
amansinha09/HSDS
|
dd7cab75bd79a2cec1b9278215303b5e34e58e89
|
[
"MIT"
] | null | null | null |
src/extract_ml_features.py
|
amansinha09/HSDS
|
dd7cab75bd79a2cec1b9278215303b5e34e58e89
|
[
"MIT"
] | 1
|
2018-11-17T09:12:31.000Z
|
2018-11-17T09:12:31.000Z
|
#extract_ml_features.py
import emoji, re, os, time, sys
from gensim.models import LdaModel
from gensim.corpora import MmCorpus, Dictionary
from isc_tokenizer import Tokenizer
from isc_tagger import Tagger
from tqdm import tqdm
import vocab_helpers as helper
import utils
import pre_processing as dproc
from nltk import ngrams
#DONE
#DONE FOR WORD AND POS
#predict topic of unseen tweet using testing example based lda model built on train set
if __name__ == '__main__':
sample = 'मैं लगातार ट्विटर पर आर्सेनल के बारे में ट्वीट्स देखता हूं। दुनिया को अपडेट करने के लिए धन्यवाद @उपयोगकर्ता & @उपयोगकर्ता शॉनक्स। #'
tknzr = Tokenizer(lang='hin')
sys.stdout = open("toutput.txt", "a", encoding='utf-8')
tokens = tknzr.tokenize(sample)
tagger = Tagger(lang='hin')
tags = tagger.tag(tokens)
valid_tokens = []
for p in tags:
if p[1] != 'SYM' and p[0] !='#':
valid_tokens.append(p[0])
#for t in tokens:
#print("=>",tokens)
#ngram_list = [gram for gram in ngrams(tokens, 2)]
#print(get_ngrams(tokens, [1,2]))
print("Tokens ",tokens)
print("POS ", tags)
print("Filtered:", valid_tokens)
| 31.960591
| 143
| 0.719174
|
#extract_ml_features.py
import emoji, re, os, time, sys
from gensim.models import LdaModel
from gensim.corpora import MmCorpus, Dictionary
from isc_tokenizer import Tokenizer
from isc_tagger import Tagger
from tqdm import tqdm
import vocab_helpers as helper
import utils
import pre_processing as dproc
from nltk import ngrams
#DONE
def get_pragmatic_features(tweet_tokens):
user_specific = intensifiers = tweet_len_ch = 0
for t in tweet_tokens:
tweet_len_ch +=len(t)
#no uppercase
#count user mention
if t.startswith('@'):
user_specific +=1
#count of hashtag
if t.startswith('#'):
user_specific +=1
#feature base don laugh
if t.startswith('हाहा') or re.match('ल(ॉ)ल+1$', t):
user_specific +=1
#count based feature
if t in helper.strong_negations:
intensifiers +=1
if t in helper.strong_affirmatives:
intensifiers +=1
if t in helper.interjections:
intensifiers +=1
if t in helper.intensifiers:
intensifiers +=1
if t in helper.punctuation:
user_specific +=1
if t in emoji.UNICODE_EMOJI:
user_specific +=1
tweet_len_tokens = len(tweet_tokens)
average_token_tokens = float(tweet_len_tokens) / max(1.0, float(tweet_len_tokens))
feature_list = { 'tw_len_ch': tweet_len_ch, 'tw_len_tok':
tweet_len_tokens, 'avg_len': average_token_tokens, 'user_specific':user_specific, 'intensifiers': intensifiers}
return feature_list
#DONE FOR WORD AND POS
def get_ngrams(tokens, n, syntatic_data=False):
if len(n) < 1:
#print("Here!")
return {}
if not syntatic_data:
#print("Length of tokens", len(tokens))
filtered =[]
stopwords = dproc.get_stopwords_list()
for t in tokens:
if t not in stopwords:
filtered.append(t)
tokens = filtered
#print("Length of filtered tokens" , len(tokens))
ngram_tokens = []
for i in n:
for gram in ngrams(tokens, i):
string_token = str(i) + '-gram '
for j in range(i):
string_token += gram[j] + ' '
ngram_tokens.append(string_token)
ngram_features = {i : ngram_tokens.count(i) for i in set(ngram_tokens)}
return ngram_features
def build_lda_model(tokens_tags, pos_tags, use_nouns = True, use_verbs = True, use_all = False, num_of_topics = 8, passes=25, verbose = True):
path = '\\'.join((os.getcwd()).split('\\')[:-1])
topics_filename = str(num_of_topics) + "topics"
if use_nouns:
topics_filename += "_nouns"
if use_verbs:
topics_filename += "_verbs"
if use_all:
topics_filename += "_all"
#set the LDA, DIctionary and Corpus filenames
lda_filename = path + "/models/topics/lda_"+ topics_filename + ".model"
dict_filename = path + "/res/topic_data/dict/dict_" + topics_filename + ".dict"
corpus_filename = path + "/res/topic_data/corpus/corpus_" + topics_filename + ".mm"
#build a topic model if wasn't created yet
if not os.path.exists(lda_filename):
# Extract lemmatize document
docs =[]
for index in range(len(tokens_tags)):
tokens = tokens_tags[index].split()
pos = pos_tags[index].split()
#docs.append(data_proc.extract_lemmatized_tweets(tokens, pos, use_verbs, use_nouns, use_all))
#compute dictionary and save it
dictionary = Dictionary(docs)
dictionary.filter_extremes(keep_n = 40000)
dictionary.compactify()
Dictionary.save(dictionary, dict_filename)
corpus = [dictionary.doc2bow(d) for d in docs]
MmCOrpus.serialize(corpus_filename, corpus)
if verbose:
print("\nCleaned DOcument:", docs)
print("\nDictionary:", dictionary)
print("\nCOrpus is BOW form:", corpus)
#start training lda model
start =time.time()
print("\n BUilding lda topics model....")
lda_model = LdaModel(corpus=corpus, num_topics = num_of_topics, passes = passes, id2word = dictionary)
lda_model.save(lda_filename)
end = time.time()
print("Completion time for building LDA model: %.3f s = %.3f min" % ((end- start), (end -start)/60.0))
if verbose:
print("\nList of words associated with each topics")
lda_topics_list = [[word for word, prob in topic] for topic_id, topic in lda_topics]
print([t for t in lda_topics_list])
#Load the previously saved dictionary
dictionary = Dictionary.load(dict_filename)
#Load the previously saved corpus
mm_corpus = MmCOrpus(corpus_filename)
#Load the provious saved LDA model
lda_model = LdaModel.load(lda_filename)
# print top 10 for each topic
if verbose:
for topic_id in range(num_of_words):
print("\n atop 10 words for each topics", topic_id)
print([dictionary[word_id] for (word_id, prob) in lda_model.get_topic_terms(topic_id, topn =10)])
index=0
if verbose:
for doc_topics, word_topics, word_phis in lda_model.get_document_topics(mm_corpus, per_word_topics =True):
print('Index', index)
print('Document topics', doc_topics)
print('Word topics:', word_topics)
print('Phi values:', word_phis)
print('--------------------------\n')
index +=1
return dictionary, mm_corpus, lda_model
#predict topic of unseen tweet using testing example based lda model built on train set
def get_topic_features_for_unseen_tweet(dictionary, lda_model, tokens_tags, pos_tags, use_nouns=True, use_verbs=True, use_all=False):
#extract the lemmatize documents
docs = data_proc.extract_lemmatized_tweets(tokens_tags, pos_tags, use_verbs, use_nouns, use_all)
tweet_bow = dictionary.doc2bow(docs)
topic_prediction = lda_model[tweet_bow]
topic_features = {}
if any(isinstance(topic_list, type([])) for topic_list in topic_prediction):
topic_prediction = topic_prediction[0]
for topic in topic_prediction:
topic_features[ 'topic '+str(topic[0])] = topic[1]
return topic_features
def get_topic_features(corpus, ldamodel, index):
topic_features = {}
doc_topics, word_topics, phi_values = ldamodel.get_document_topics(corpus, per_word_topics=True)[index]
for topic in doc_topics:
topic_features['topic '+ str(topic[0])] = topic[1]
return topic_features
if __name__ == '__main__':
sample = 'मैं लगातार ट्विटर पर आर्सेनल के बारे में ट्वीट्स देखता हूं। दुनिया को अपडेट करने के लिए धन्यवाद @उपयोगकर्ता & @उपयोगकर्ता शॉनक्स। #'
tknzr = Tokenizer(lang='hin')
sys.stdout = open("toutput.txt", "a", encoding='utf-8')
tokens = tknzr.tokenize(sample)
tagger = Tagger(lang='hin')
tags = tagger.tag(tokens)
valid_tokens = []
for p in tags:
if p[1] != 'SYM' and p[0] !='#':
valid_tokens.append(p[0])
#for t in tokens:
#print("=>",tokens)
#ngram_list = [gram for gram in ngrams(tokens, 2)]
#print(get_ngrams(tokens, [1,2]))
print("Tokens ",tokens)
print("POS ", tags)
print("Filtered:", valid_tokens)
| 5,279
| 0
| 112
|
70d83f669a2aceeb1487977c9a3a11d4d4f1f042
| 244
|
py
|
Python
|
src/__init__.py
|
MarcelFox/api-modelo
|
1ca862446893d0f0d079cde1b10931b8fd188c57
|
[
"CC0-1.0"
] | 1
|
2020-09-29T14:55:08.000Z
|
2020-09-29T14:55:08.000Z
|
src/__init__.py
|
MarcelFox/api-modelo
|
1ca862446893d0f0d079cde1b10931b8fd188c57
|
[
"CC0-1.0"
] | null | null | null |
src/__init__.py
|
MarcelFox/api-modelo
|
1ca862446893d0f0d079cde1b10931b8fd188c57
|
[
"CC0-1.0"
] | null | null | null |
from flask import Flask
| 20.333333
| 44
| 0.668033
|
from flask import Flask
def create_app():
app = Flask(__name__)
# app.config.from_object('config')
with app.app_context():
from src.app.Router import file_urls
app.register_blueprint(file_urls)
return app
| 196
| 0
| 23
|
4b523a47b862a5ac14fd0e2ce940728a52b0da94
| 535
|
py
|
Python
|
source/chunk.py
|
Ryaangu/pyler
|
685955088454b01f649a5de95b4b3cf6c6078db3
|
[
"MIT"
] | 1
|
2020-11-05T23:36:31.000Z
|
2020-11-05T23:36:31.000Z
|
source/chunk.py
|
Ryaangu/pyler
|
685955088454b01f649a5de95b4b3cf6c6078db3
|
[
"MIT"
] | null | null | null |
source/chunk.py
|
Ryaangu/pyler
|
685955088454b01f649a5de95b4b3cf6c6078db3
|
[
"MIT"
] | 1
|
2020-11-06T12:44:21.000Z
|
2020-11-06T12:44:21.000Z
|
# Chunk
# Write to chunk
# Add constant to chunk
| 19.107143
| 43
| 0.583178
|
# Chunk
class Chunk():
count = 0
constants_count = 0
code = []
lines = []
columns = []
constants = []
local_variables = []
# Write to chunk
def chunk_write(chunk, byte, line, column):
chunk.code.append(byte)
chunk.lines.append(line)
chunk.columns.append(column)
chunk.count += 1
# Add constant to chunk
def add_constant(chunk, value):
chunk.constants.append(value)
chunk.constants_count += 1
return (chunk.constants_count - 1)
| 251
| 168
| 66
|
0a9e811c6d130935e11801209b3068eba9b73f6d
| 9,010
|
py
|
Python
|
elyra/tests/pipeline/test_pipeline_parser.py
|
el-aasi/elyra
|
bd06a22c97a5e6083d5a29d88303142e826e2eab
|
[
"Apache-2.0"
] | 1
|
2022-02-18T14:21:33.000Z
|
2022-02-18T14:21:33.000Z
|
elyra/tests/pipeline/test_pipeline_parser.py
|
el-aasi/elyra
|
bd06a22c97a5e6083d5a29d88303142e826e2eab
|
[
"Apache-2.0"
] | null | null | null |
elyra/tests/pipeline/test_pipeline_parser.py
|
el-aasi/elyra
|
bd06a22c97a5e6083d5a29d88303142e826e2eab
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2018-2022 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from elyra.pipeline.parser import PipelineParser
from elyra.pipeline.pipeline import GenericOperation
from elyra.tests.pipeline.util import _read_pipeline_resource
@pytest.fixture
| 39.004329
| 112
| 0.723085
|
#
# Copyright 2018-2022 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from elyra.pipeline.parser import PipelineParser
from elyra.pipeline.pipeline import GenericOperation
from elyra.tests.pipeline.util import _read_pipeline_resource
@pytest.fixture
def valid_operation():
component_parameters = {
'filename': '{{filename}}',
'runtime_image': '{{runtime_image}}',
'env_vars': ["var1=var1", "var2=var2"],
'dependencies': ["a.txt", "b.txt", "c.txt"],
'outputs': ["d.txt", "e.txt", "f.txt"]
}
return GenericOperation(id='{{uuid}}',
type='execution_node',
classifier='execute-notebook-node',
name='{{label}}',
component_params=component_parameters)
def test_valid_pipeline(valid_operation):
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline = PipelineParser().parse(pipeline_json)
assert pipeline.name == '{{name}}'
assert pipeline.runtime == '{{runtime}}'
assert pipeline.runtime_config == '{{runtime-config}}'
assert len(pipeline.operations) == 1
assert pipeline.operations['{{uuid}}'] == valid_operation
def test_pipeline_with_dirty_list_values(valid_operation):
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_with_invalid_list_values.json')
pipeline = PipelineParser().parse(pipeline_json)
assert pipeline.name == '{{name}}'
assert pipeline.runtime == '{{runtime}}'
assert pipeline.runtime_config == '{{runtime-config}}'
assert len(pipeline.operations) == 1
assert pipeline.operations['{{uuid}}'] == valid_operation
def test_multinode_pipeline():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_3_node_sample.json')
pipeline = PipelineParser().parse(pipeline_json)
assert len(pipeline.operations) == 3
def test_supernode_pipeline():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_with_supernode.json')
pipeline = PipelineParser().parse(pipeline_json)
assert len(pipeline.operations) == 4
# Confirm structure of pipeline:
# Two execution nodes feed their outputs to super-node with one execution_node.
# Super-node's execution node, then sends its output to external execution node.
# 4 nodes total. Super-node execution node should have two parent-operations
# pointing at first two nodes, and final node should have one parent pointing
# at execution node WITHIN supernode.
external_input_node_ids = ["db9f3f5b-b2e3-4824-aadd-c1c6bf652534", "f6584209-6f22-434f-9820-41327b6c749d"]
supernode_excution_node_id = "079c0e12-eb5f-4fcc-983b-09e011869fee"
external_node_id = "7628306d-2cc2-405c-94a1-fe42c95567a1"
for node_id in pipeline.operations:
# Validate operations list
if node_id in external_input_node_ids:
# These are input nodes, ensure parent_operation_ids are empty
assert len(pipeline.operations[node_id].parent_operation_ids) == 0
continue
if node_id == supernode_excution_node_id:
# Node within supernode, should have two parent_ops matching external_input_node_ids
assert len(pipeline.operations[node_id].parent_operation_ids) == 2
assert set(pipeline.operations[node_id].parent_operation_ids) == set(external_input_node_ids)
continue
if node_id == external_node_id:
# Final external node, should have super_node embedded node as parent op.
assert len(pipeline.operations[node_id].parent_operation_ids) == 1
assert pipeline.operations[node_id].parent_operation_ids[0] == supernode_excution_node_id
continue
assert False, "Invalid node_id encountered in pipeline operations!"
def test_multiple_pipeline_definition():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/'
'pipeline_multiple_pipeline_definitions.json')
with pytest.raises(ValueError):
PipelineParser().parse(pipeline_json)
def test_pipeline_operations_and_handle_artifact_file_details():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_3_node_sample.json')
pipeline = PipelineParser().parse(pipeline_json)
assert len(pipeline.operations) == 3
for op in pipeline.operations.values():
assert '.' not in op.name
def test_pipeline_with_dependencies():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/'
'pipeline_3_node_sample_with_dependencies.json')
pipeline = PipelineParser().parse(pipeline_json)
assert len(pipeline.operations['acc4527d-7cc8-4c16-b520-5aa0f50a2e34'].parent_operation_ids) == 2
def test_pipeline_with_comments():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/'
'pipeline_3_node_sample_with_comments.json')
pipeline = PipelineParser().parse(pipeline_json)
assert pipeline.operations['d52ddfb4-dd0e-47ac-abc7-fa30bb95d45c'].doc \
== "Generate community stats and then aggregate them on an overview dashboard"
def test_pipeline_global_attributes():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline = PipelineParser().parse(pipeline_json)
assert pipeline.name == '{{name}}'
assert pipeline.runtime == '{{runtime}}'
assert pipeline.runtime_config == '{{runtime-config}}'
def test_missing_pipeline_name_should_default_to_untitled():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline_json['pipelines'][0]['app_data']['properties'].pop('name')
pipeline = PipelineParser().parse(pipeline_json)
assert pipeline.name == 'untitled'
def test_missing_pipeline_runtime():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline_json['pipelines'][0]['app_data'].pop('runtime')
with pytest.raises(ValueError) as e:
PipelineParser().parse(pipeline_json)
assert "Invalid pipeline: Missing runtime." in str(e.value)
def test_missing_pipeline_runtime_configuration():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline_json['pipelines'][0]['app_data'].pop('runtime_config')
with pytest.raises(ValueError) as e:
PipelineParser().parse(pipeline_json)
assert "Invalid pipeline: Missing runtime configuration" in str(e.value)
def test_missing_operation_id():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline_json['pipelines'][0]['nodes'][0].pop('id')
with pytest.raises(ValueError) as e:
PipelineParser().parse(pipeline_json)
assert "Missing field 'operation id'" in str(e.value)
def test_missing_operation_type():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline_json['pipelines'][0]['nodes'][0].pop('type')
with pytest.raises(ValueError) as e:
PipelineParser().parse(pipeline_json)
assert "Node type 'None' is invalid!" in str(e.value)
def test_invalid_node_type():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline_json['pipelines'][0]['nodes'][0]['type'] = 'foo'
with pytest.raises(ValueError) as e:
PipelineParser().parse(pipeline_json)
assert "Node type 'foo' is invalid!" in str(e.value)
def test_missing_operation_filename():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline_json['pipelines'][0]['nodes'][0]['app_data']['component_parameters'].pop('filename')
with pytest.raises(ValueError) as e:
PipelineParser().parse(pipeline_json)
assert "Missing field 'operation filename" in str(e.value)
def test_missing_operation_image():
pipeline_json = _read_pipeline_resource('resources/sample_pipelines/pipeline_valid.json')
pipeline_json['pipelines'][0]['nodes'][0]['app_data']['component_parameters'].pop('runtime_image')
with pytest.raises(ValueError) as e:
PipelineParser().parse(pipeline_json)
assert "Missing field 'operation runtime image'" in str(e.value)
| 7,801
| 0
| 413
|
a512f5392dbace5bfc36af9bb0cb5f229444416d
| 6,379
|
py
|
Python
|
csv_to_coco.py
|
ZHUXUHAN/Tools
|
98a0776f460febc69af5523e2c69d7702ee04876
|
[
"MIT"
] | 1
|
2019-11-20T12:16:21.000Z
|
2019-11-20T12:16:21.000Z
|
csv_to_coco.py
|
ZHUXUHAN/Python-Tools
|
98a0776f460febc69af5523e2c69d7702ee04876
|
[
"MIT"
] | null | null | null |
csv_to_coco.py
|
ZHUXUHAN/Python-Tools
|
98a0776f460febc69af5523e2c69d7702ee04876
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
from collections import OrderedDict
import cv2
import numpy as np
import json
gt_data_path = "/home/priv-lab1/workspace/zxh/end2/csv/anno_box_train.csv"
hoi_list_path = "/home/priv-lab1/workspace/zxh/end2/origin_lists/hico_list_hoi.txt"
img_path = '/home/priv-lab1/workspace/zxh/My_Database/hico_20160224_det/images/train2015/' # image folder path
save_json_path = 'train_hico.json' # name for save json
_classes = ('person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic_light',
'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball',
'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed',
'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy_bear', 'hair_drier', 'toothbrush')
df_gt_data = pd.DataFrame.from_csv(gt_data_path)
# str to list
df_gt_data['human_bbox'] = df_gt_data['human_bbox'].apply(lambda x: list(map(int, x.strip('[]').split(','))))
df_gt_data['obj_bbox'] = df_gt_data['obj_bbox'].apply(lambda x: list(map(int, x.strip('[]').split(','))))
df_gt_data['img_size_w_h'] = df_gt_data['img_size_w_h'].apply(lambda x: list(map(int, x.strip('[]').split(','))))
human_bbox_dict=OrderedDict()
object_bbox_dict=OrderedDict()
filenames=[]
action_dict=OrderedDict()
list_action=[]
for index,row in df_gt_data.iterrows():
filenames.append(row['name'])
if row['name'] in human_bbox_dict:
human_bbox_dict[row['name']].append(row['human_bbox'])
object_bbox_dict[row['name']].append(row['obj_bbox'])
action_dict[row['name']].append(row['action_no'])
else:
human_bbox_dict[row['name']]=[row['human_bbox']]
object_bbox_dict[row['name']] = [row['obj_bbox']]
action_dict[row['name']] = [row['action_no']]
filenames=set(filenames)
with open(hoi_list_path,'r') as f :
lines=f.readlines()
for line in lines[2:]:
list_action.append(line.split())
print("data set done")
if __name__ == '__main__':
Convert_csv_to_coco('train')
| 39.621118
| 113
| 0.551811
|
import os
import pandas as pd
from collections import OrderedDict
import cv2
import numpy as np
import json
gt_data_path = "/home/priv-lab1/workspace/zxh/end2/csv/anno_box_train.csv"
hoi_list_path = "/home/priv-lab1/workspace/zxh/end2/origin_lists/hico_list_hoi.txt"
img_path = '/home/priv-lab1/workspace/zxh/My_Database/hico_20160224_det/images/train2015/' # image folder path
save_json_path = 'train_hico.json' # name for save json
_classes = ('person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic_light',
'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball',
'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed',
'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy_bear', 'hair_drier', 'toothbrush')
df_gt_data = pd.DataFrame.from_csv(gt_data_path)
# str to list
df_gt_data['human_bbox'] = df_gt_data['human_bbox'].apply(lambda x: list(map(int, x.strip('[]').split(','))))
df_gt_data['obj_bbox'] = df_gt_data['obj_bbox'].apply(lambda x: list(map(int, x.strip('[]').split(','))))
df_gt_data['img_size_w_h'] = df_gt_data['img_size_w_h'].apply(lambda x: list(map(int, x.strip('[]').split(','))))
human_bbox_dict=OrderedDict()
object_bbox_dict=OrderedDict()
filenames=[]
action_dict=OrderedDict()
list_action=[]
for index,row in df_gt_data.iterrows():
filenames.append(row['name'])
if row['name'] in human_bbox_dict:
human_bbox_dict[row['name']].append(row['human_bbox'])
object_bbox_dict[row['name']].append(row['obj_bbox'])
action_dict[row['name']].append(row['action_no'])
else:
human_bbox_dict[row['name']]=[row['human_bbox']]
object_bbox_dict[row['name']] = [row['obj_bbox']]
action_dict[row['name']] = [row['action_no']]
filenames=set(filenames)
with open(hoi_list_path,'r') as f :
lines=f.readlines()
for line in lines[2:]:
list_action.append(line.split())
print("data set done")
class Convert_csv_to_coco(object):
def __init__(self,mode):
# self.img_hoi = img_hoi_train
# self.row_num=img_hoi_train.shape[0]#行数
self.save_json_path = save_json_path
self.mode=mode
self.images = []
self.categories = []
self.annotations = []
self.label_map = {}
for i in range(len(_classes)):
self.label_map[_classes[i]] = i
self.annID = 1
self.transfer_process()
self.save_json()
def transfer_process(self):
# categories
for i in range(0, len(_classes)):
categories = {'supercategory': _classes[i], 'id': i,
'name': _classes[i]}
self.categories.append(categories)
for i,file in enumerate(filenames):
if i% 100 == 0 or i==len(filenames)-1:
print('CSV transfer process {}'.format(str(i + 1)))
data_name = file
if os.path.exists(img_path + data_name):
img_p = cv2.imread(img_path + data_name )
filename = data_name
width = img_p.shape[1]
height = img_p.shape[0]
else:
with open("./save.txt",'w') as f:
f.write(img_path + data_name)
print(img_path + data_name)
def processing_ann( bbox):
x1 = np.maximum(0.0, float(bbox[0]))
y1 = np.maximum(0.0, float(bbox[2]))
x2 = np.minimum(width - 1.0, float(bbox[1]))
y2 = np.minimum(height - 1.0, float(bbox[3]))
# rectangle = [x1, y1, x2, y2]
bbox = [x1, y1, x2 - x1 + 1, y2 - y1 + 1] # [x,y,w,h]
area = (x2 - x1 + 1) * (y2 - y1 + 1)
return bbox, area
# images
image = {'height': height, 'width': width, 'id': i, 'file_name': filename}
self.images.append(image)
obboxs = object_bbox_dict[file]
actions = action_dict[file]
for ii,hbbox in enumerate(human_bbox_dict[file]):
h_x1 = hbbox[0]
h_x2 = hbbox[1]
h_y1 = hbbox[2]
h_y2 = hbbox[3]
human_bbox=[h_x1,h_x2,h_y1,h_y2]
o_x1 = obboxs[ii][0]
o_x2 = obboxs[ii][1]
o_y1 = obboxs[ii][2]
o_y2 = obboxs[ii][3]
obj_bbox=[o_x1,o_x2,o_y1,o_y2]
#
label = list_action[actions[ii]-1][1]
human_bbox,human_bbox_area=processing_ann(human_bbox)
human_annotation = {'segmentation': [], 'iscrowd': 0, 'area': human_bbox_area, 'image_id': i,
'bbox': human_bbox, 'difficult': 0,
'category_id': self.label_map['person'], 'id': self.annID}
self.annotations.append(human_annotation)
self.annID += 1
obj_bbox, obj_bbox_area = processing_ann(obj_bbox)
obj_annotation = {'segmentation': [], 'iscrowd': 0, 'area': obj_bbox_area, 'image_id': i,
'bbox': obj_bbox, 'difficult': 0,
'category_id': self.label_map[label], 'id': self.annID}
self.annotations.append(obj_annotation)
self.annID += 1
def save_json(self):
data_coco = {'images': self.images, 'categories': self.categories, 'annotations': self.annotations}
json.dump(data_coco, open(self.save_json_path, 'w'), indent=4)
if __name__ == '__main__':
Convert_csv_to_coco('train')
| 3,530
| 13
| 102
|
5889664d73c5e5d85739f1d451909c87f007011e
| 3,513
|
py
|
Python
|
dirutility/multiprocess.py
|
mrstephenneal/dirutility
|
c51b4c3bd543da8bb69e496d0c3ec8333897042c
|
[
"MIT"
] | 2
|
2018-07-27T18:34:10.000Z
|
2018-10-09T21:40:34.000Z
|
dirutility/multiprocess.py
|
mrstephenneal/dirutility
|
c51b4c3bd543da8bb69e496d0c3ec8333897042c
|
[
"MIT"
] | 7
|
2018-07-27T17:29:36.000Z
|
2018-10-01T13:29:52.000Z
|
dirutility/multiprocess.py
|
mrstephenneal/dirutility
|
c51b4c3bd543da8bb69e496d0c3ec8333897042c
|
[
"MIT"
] | 1
|
2019-09-26T13:04:04.000Z
|
2019-09-26T13:04:04.000Z
|
from multiprocessing import cpu_count
from multiprocessing.pool import Pool
from tqdm import tqdm
def pool_process(func, iterable, cpus=cpu_count(), return_vals=False, cpu_reduction=0, progress_bar=False):
"""
Multiprocessing helper function for performing looped operation using multiple processors.
:param func: Function to call
:param iterable: Iterable object to perform each function on
:param cpus: Number of cpu cores, defaults to system's cpu count
:param return_vals: Bool, returns output values when True
:param cpu_reduction: Number of cpu core's to not use
:param progress_bar: Display text based progress bar
:return:
"""
with Pool(cpus - abs(cpu_reduction)) as pool:
# Return values returned by 'func'
if return_vals:
# Show progress bar
if progress_bar:
vals = [v for v in tqdm(pool.imap_unordered(func, iterable), total=len(iterable))]
# No progress bar
else:
vals = pool.map(func, iterable)
# Close pool and return values
pool.close()
# pool.join()
return vals
# Don't capture values returned by 'func'
else:
pool.map(func, iterable)
pool.close()
return True
| 36.978947
| 107
| 0.63877
|
from multiprocessing import cpu_count
from multiprocessing.pool import Pool
from tqdm import tqdm
def pool_process(func, iterable, cpus=cpu_count(), return_vals=False, cpu_reduction=0, progress_bar=False):
"""
Multiprocessing helper function for performing looped operation using multiple processors.
:param func: Function to call
:param iterable: Iterable object to perform each function on
:param cpus: Number of cpu cores, defaults to system's cpu count
:param return_vals: Bool, returns output values when True
:param cpu_reduction: Number of cpu core's to not use
:param progress_bar: Display text based progress bar
:return:
"""
with Pool(cpus - abs(cpu_reduction)) as pool:
# Return values returned by 'func'
if return_vals:
# Show progress bar
if progress_bar:
vals = [v for v in tqdm(pool.imap_unordered(func, iterable), total=len(iterable))]
# No progress bar
else:
vals = pool.map(func, iterable)
# Close pool and return values
pool.close()
# pool.join()
return vals
# Don't capture values returned by 'func'
else:
pool.map(func, iterable)
pool.close()
return True
class PoolProcess:
_func = None
_iterable = None
def __init__(self, func, iterable, cpus=cpu_count(), cpu_reduction=0, filter_nulls=False):
"""
Multiprocessing helper function for performing looped operation using multiple processors.
:param func: Function to call
:param iterable: Iterable object to perform each function on
:param cpus: Number of cpu cores, defaults to system's cpu count
:param cpu_reduction: Number of cpu core's to not use
:param filter_nulls: Bool, when true None values are removed from the result list before return
"""
self._func = func
self._iterable = iterable
self.cpu_count = cpus - abs(cpu_reduction)
self.filter_nulls = filter_nulls
self._result = None
@property
def result(self):
"""Return the results returned by map_return or map_tqdm methods."""
# Remove None values from self._result if filter_nulls is enabled
return [i for i in self._result if i is not None] if self.filter_nulls else self._result
def map(self):
"""Perform a function on every item in an iterable."""
with Pool(self.cpu_count) as pool:
pool.map(self._func, self._iterable)
pool.close()
return True
def map_return(self):
"""Perform a function on every item and return a list of yield values."""
with Pool(self.cpu_count) as pool:
self._result = pool.map(self._func, self._iterable)
pool.close()
return self.result
def map_tqdm(self, desc=None, unit='it'):
"""
Perform a function on every item while displaying a progress bar.
:param desc: Optional, progress bar description
:param unit: Optional, progress bar units (default is 'it' for 'iteration')
:return: A list of yielded values
"""
tqdm_args = dict(total=len(self._iterable), desc=desc, unit=unit)
with Pool(self.cpu_count) as pool:
self._result = [v for v in tqdm(pool.imap_unordered(self._func, self._iterable), **tqdm_args)]
pool.close()
return self.result
| 0
| 2,167
| 23
|
f5483164f422c3135aabb74cd6db1a01d89851f4
| 617
|
py
|
Python
|
census_dp/noisy_max.py
|
candrsn/census-dp
|
a98b4bc4e03dab3c5d77723806daf387a8cbee8b
|
[
"MIT"
] | 13
|
2019-08-30T15:05:21.000Z
|
2022-03-11T14:17:01.000Z
|
census_dp/noisy_max.py
|
chrishwiggins/census-dp
|
a98b4bc4e03dab3c5d77723806daf387a8cbee8b
|
[
"MIT"
] | 1
|
2019-08-01T16:20:58.000Z
|
2019-08-01T16:20:58.000Z
|
census_dp/noisy_max.py
|
chrishwiggins/census-dp
|
a98b4bc4e03dab3c5d77723806daf387a8cbee8b
|
[
"MIT"
] | 4
|
2019-09-23T19:29:34.000Z
|
2021-02-13T18:09:43.000Z
|
import numpy as np
from laplace import laplace_mech
def noisy_max(answers: np.ndarray, epsilon: float, sensitivity: float):
""" Implementation of the noisy max mechanism with gap using Laplace noise
Given a set of queries, this mechanism will return the **index**, not the
value, of the query that is probably largest.
Args:
answers (float or numpy array): the set of queries
epsilon (float): the privacy budget
sensitivity (float): the global sensitivity of the query
"""
noisy_answers = laplace_mech(answers, epsilon/2.0, sensitivity)
return noisy_answers.argmax()
| 34.277778
| 78
| 0.721232
|
import numpy as np
from laplace import laplace_mech
def noisy_max(answers: np.ndarray, epsilon: float, sensitivity: float):
""" Implementation of the noisy max mechanism with gap using Laplace noise
Given a set of queries, this mechanism will return the **index**, not the
value, of the query that is probably largest.
Args:
answers (float or numpy array): the set of queries
epsilon (float): the privacy budget
sensitivity (float): the global sensitivity of the query
"""
noisy_answers = laplace_mech(answers, epsilon/2.0, sensitivity)
return noisy_answers.argmax()
| 0
| 0
| 0
|
ed609976d006987150b382827dca7d8b313dbdb3
| 782
|
py
|
Python
|
tests/test_metric_logger.py
|
zhtianxiao/DLA-Combined-IoUs
|
0b9db0e8e2b2927928bd57c6032497d3b87e7905
|
[
"BSD-2-Clause"
] | 2
|
2022-01-27T07:08:34.000Z
|
2022-03-22T03:14:11.000Z
|
tests/test_metric_logger.py
|
zhtianxiao/DLA-Combined-IoUs
|
0b9db0e8e2b2927928bd57c6032497d3b87e7905
|
[
"BSD-2-Clause"
] | 1
|
2022-02-04T05:38:04.000Z
|
2022-02-04T05:38:04.000Z
|
tests/test_metric_logger.py
|
zhtianxiao/DLA-Combined-IoUs
|
0b9db0e8e2b2927928bd57c6032497d3b87e7905
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from dynamic_atss_core.utils.metric_logger import MetricLogger
if __name__ == "__main__":
unittest.main()
| 25.225806
| 71
| 0.63555
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import unittest
from dynamic_atss_core.utils.metric_logger import MetricLogger
class TestMetricLogger(unittest.TestCase):
def test_update(self):
meter = MetricLogger()
for i in range(10):
meter.update(metric=float(i))
m = meter.meters["metric"]
self.assertEqual(m.count, 10)
self.assertEqual(m.total, 45)
self.assertEqual(m.median, 4)
self.assertEqual(m.avg, 4.5)
def test_no_attr(self):
meter = MetricLogger()
_ = meter.meters
_ = meter.delimiter
def broken():
_ = meter.not_existent
self.assertRaises(AttributeError, broken)
if __name__ == "__main__":
unittest.main()
| 483
| 21
| 76
|
a125abeb0e27dc36ea6ff47b03ac4c93aff2e032
| 2,224
|
py
|
Python
|
python/run_emulator.py
|
LCClyde/NyraEmulationSystem
|
bedd164316abbe833b066e282e0de1c506d45f2b
|
[
"MIT"
] | null | null | null |
python/run_emulator.py
|
LCClyde/NyraEmulationSystem
|
bedd164316abbe833b066e282e0de1c506d45f2b
|
[
"MIT"
] | 1
|
2015-05-12T11:19:58.000Z
|
2015-05-12T11:19:58.000Z
|
python/run_emulator.py
|
LCClyde/NyraEmulationSystem
|
bedd164316abbe833b066e282e0de1c506d45f2b
|
[
"MIT"
] | null | null | null |
import argparse
import pygame
import sys
from nes import Controller
from screen import Screen
from fps import FPS
from tas import TAS
from emulator import Emulator
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = 'Dumps NES header information')
parser.add_argument('pathname', help='specify the NES file to open')
parser.add_argument('--tas', dest='tas', help='specify a TAS file to run', default=None)
parser.add_argument('--scale', dest='scale', help='specify the screen scale', default = 1)
args = parser.parse_args()
emulator = Emulator(args.pathname)
screen = Screen(args.scale)
fps = FPS()
if args.tas:
target_fps = 1000.0
tas = TAS(args.tas)
else:
target_fps = 60.0
tas = None
keep_going = True
while keep_going:
try:
# Get button presses
if tas == None:
pressed = pygame.key.get_pressed()
controller = emulator.controllers[0]
if pressed[pygame.K_RETURN]:
controller.set_key(Controller.BUTTON_START)
if pressed[pygame.K_RSHIFT]:
controller.set_key(Controller.BUTTON_SELECT)
if pressed[pygame.K_a]:
controller.set_key(Controller.BUTTON_LEFT)
if pressed[pygame.K_d]:
controller.set_key(Controller.BUTTON_RIGHT)
if pressed[pygame.K_w]:
controller.set_key(Controller.BUTTON_UP)
if pressed[pygame.K_s]:
controller.set_key(Controller.BUTTON_DOWN)
if pressed[pygame.K_j]:
controller.set_key(Controller.BUTTON_B)
if pressed[pygame.K_k]:
controller.set_key(Controller.BUTTON_A)
if tas != None:
tas.update_controller(emulator.controllers[0])
emulator.tick(screen)
keep_going = screen.render()
fps.update(target_fps)
except Exception, e:
print 'Exception occurred: ' + str(e)
keep_going = False
| 35.301587
| 94
| 0.571043
|
import argparse
import pygame
import sys
from nes import Controller
from screen import Screen
from fps import FPS
from tas import TAS
from emulator import Emulator
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description = 'Dumps NES header information')
parser.add_argument('pathname', help='specify the NES file to open')
parser.add_argument('--tas', dest='tas', help='specify a TAS file to run', default=None)
parser.add_argument('--scale', dest='scale', help='specify the screen scale', default = 1)
args = parser.parse_args()
emulator = Emulator(args.pathname)
screen = Screen(args.scale)
fps = FPS()
if args.tas:
target_fps = 1000.0
tas = TAS(args.tas)
else:
target_fps = 60.0
tas = None
keep_going = True
while keep_going:
try:
# Get button presses
if tas == None:
pressed = pygame.key.get_pressed()
controller = emulator.controllers[0]
if pressed[pygame.K_RETURN]:
controller.set_key(Controller.BUTTON_START)
if pressed[pygame.K_RSHIFT]:
controller.set_key(Controller.BUTTON_SELECT)
if pressed[pygame.K_a]:
controller.set_key(Controller.BUTTON_LEFT)
if pressed[pygame.K_d]:
controller.set_key(Controller.BUTTON_RIGHT)
if pressed[pygame.K_w]:
controller.set_key(Controller.BUTTON_UP)
if pressed[pygame.K_s]:
controller.set_key(Controller.BUTTON_DOWN)
if pressed[pygame.K_j]:
controller.set_key(Controller.BUTTON_B)
if pressed[pygame.K_k]:
controller.set_key(Controller.BUTTON_A)
if tas != None:
tas.update_controller(emulator.controllers[0])
emulator.tick(screen)
keep_going = screen.render()
fps.update(target_fps)
except Exception, e:
print 'Exception occurred: ' + str(e)
keep_going = False
| 0
| 0
| 0
|
20454a0fbb1ea04507f12ffef3b7da1960cf9ea3
| 1,317
|
py
|
Python
|
lambda_cron/cli/command/aws_lambda.py
|
MediaMath/lambda-cron
|
2545e9fdeced7ebeaba2f98d02891cc6db7546e2
|
[
"Apache-2.0"
] | 22
|
2017-10-27T11:37:58.000Z
|
2021-11-09T09:35:37.000Z
|
lambda_cron/cli/command/aws_lambda.py
|
MediaMath/lambda-cron
|
2545e9fdeced7ebeaba2f98d02891cc6db7546e2
|
[
"Apache-2.0"
] | 1
|
2018-03-21T18:31:01.000Z
|
2018-03-21T18:31:01.000Z
|
lambda_cron/cli/command/aws_lambda.py
|
MediaMath/lambda-cron
|
2545e9fdeced7ebeaba2f98d02891cc6db7546e2
|
[
"Apache-2.0"
] | 3
|
2017-10-27T16:49:42.000Z
|
2018-11-03T04:14:10.000Z
|
# Copyright (C) 2016 MediaMath <http://www.mediamath.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
from command import AwsCommand
| 38.735294
| 145
| 0.675778
|
# Copyright (C) 2016 MediaMath <http://www.mediamath.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
from command import AwsCommand
class InvokeCommand(AwsCommand):
def payload(self):
return "\"source\": \"LambdaCron-cli-invoke\", \"time\": \"{time}\", \"resources\": [\"Manual:invoke/LambdaCron-{environment}\"]".format(
time=datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ'),
environment=self.config.environment
)
def run(self):
invoke_command = [
"aws", "lambda", "invoke", "--invocation-type", "Event", "--function-name", self.get_stack_name(),
"--payload", '{' + self.payload() + '}', os.path.join(self.get_tmp_directory(), 'invoke_output.txt')
]
self.exec_command(invoke_command)
| 570
| 11
| 77
|
f42c02c0c9b6c0a562b01ec9dc5934ca6fed0f1a
| 4,602
|
py
|
Python
|
geoportal/geoportailv3_geoportal/views/casipo.py
|
Geoportail-Luxembourg/geoportailv3
|
a3797a426e263683cdebe371753e655604789474
|
[
"MIT"
] | 17
|
2015-01-14T08:40:22.000Z
|
2021-05-08T04:39:50.000Z
|
geoportal/geoportailv3_geoportal/views/casipo.py
|
Geoportail-Luxembourg/geoportailv3
|
a3797a426e263683cdebe371753e655604789474
|
[
"MIT"
] | 1,477
|
2015-01-05T09:58:41.000Z
|
2022-03-18T11:07:09.000Z
|
geoportal/geoportailv3_geoportal/views/casipo.py
|
Geoportail-Luxembourg/geoportailv3
|
a3797a426e263683cdebe371753e655604789474
|
[
"MIT"
] | 14
|
2015-07-24T07:33:13.000Z
|
2021-03-02T13:51:48.000Z
|
# -*- coding: UTF-8 -*-
from pyramid.i18n import get_localizer, TranslationStringFactory
from pyramid.view import view_config
from pyramid.response import Response
from c2cgeoportal_commons.models import DBSessions, DBSession
import logging
import owncloud
import shutil
import os
import smtplib
import urllib.request
from email.mime.text import MIMEText
import time
import datetime
import sys
_ = TranslationStringFactory("geoportailv3_geoportal-server")
log = logging.getLogger(__name__)
| 37.112903
| 215
| 0.590613
|
# -*- coding: UTF-8 -*-
from pyramid.i18n import get_localizer, TranslationStringFactory
from pyramid.view import view_config
from pyramid.response import Response
from c2cgeoportal_commons.models import DBSessions, DBSession
import logging
import owncloud
import shutil
import os
import smtplib
import urllib.request
from email.mime.text import MIMEText
import time
import datetime
import sys
_ = TranslationStringFactory("geoportailv3_geoportal-server")
log = logging.getLogger(__name__)
class Casipo(object):
def __init__(self, request):
self.request = request
self.config = self.request.registry.settings
self.localizer = get_localizer(self.request)
def __download(self, num):
if self.staging:
url = "%s?ids=%s&token=%s" % (
self.config["casipo"]["staging_url"],
num,
self.config["casipo"]["fme_token"])
else:
url = "%s?ids=%s&token=%s" % (
self.config["casipo"]["prod_url"],
num,
self.config["casipo"]["fme_token"])
db_ecadastre = DBSessions['ecadastre']
cnt = 0
try:
sql = "select nextval_daily ('casipo_seq')"
results = DBSession.execute(sql)
for res in results:
cnt = res[0]
except Exception as e:
log.exception(e)
try:
f = urllib.request.urlopen(url, None, 1800)
data = f
# YYYYMMJJ_Commune_Extrait_CASIPO_nn.pdf
commune = ""
sql = "select replace(commune_administrative , '/', '_') as commune_administrative FROM DIFFDATA.communes_adm_cad_sections WHERE code_commune = " + str(int(num[0:3])) + " GROUP BY commune_administrative"
results = db_ecadastre.execute(sql)
for res in results:
commune = res['commune_administrative']
self.filename = '/tmp/%s_%s_Extrait_CASIPO_%s.pdf' % (str(datetime.datetime.now().strftime("%Y%m%d")), commune, str(cnt))
with open(self.filename, 'wb') as fp:
shutil.copyfileobj(data, fp)
except Exception as e:
log.exception(e)
data = None
log.debug(url)
return
def __upload2owncloud(self):
oc = owncloud.Client(self.config["casipo"]["owncloud_internal_url"])
oc.login(self.config["casipo"]["owncloud_user"],
self.config["casipo"]["owncloud_password"])
oc.put_file(os.path.basename(self.filename), self.filename)
link_info = oc.share_file_with_link(os.path.basename(self.filename))
self.link = link_info.get_link().replace(
self.config["casipo"]["owncloud_internal_url"].replace('http://', 'https://'),
self.config["casipo"]["owncloud_external_url"])
self.link += "/download"
os.remove(self.filename)
return
def __send_mail(self, email):
if self.link == 'error':
mailtext = _("CASIPO Error during report generation")
else:
mailtext = _("CASIPO Mail the report link ${link}",
mapping={'link': self.link})
msg = MIMEText(self.localizer.translate(mailtext), 'html', 'utf-8')
me = 'support@geoportail.lu'
you = email
mails = [you]
if "bcc_address" in self.config["casipo"]:
bcc = self.config["casipo"]["bcc_address"]
msg['BCC'] = bcc
mails.append(bcc)
msg['Subject'] = 'Rapport CASIPO'
msg['From'] = me
msg['To'] = you
s = smtplib.SMTP(self.config["casipo"]["smtp_server"])
s.sendmail(me, mails, msg.as_string())
s.quit()
return
def __log_download_stats(self, objectids, download_link):
pass
@view_config(route_name='casipo_report')
def casipo_report(self):
oid = self.request.matchdict.get('oid', None)
email = self.request.params.get('email', None)
self.staging =\
self.request.params.get('staging', 'False').lower() == 'true'
resp = _("CASIPO webservice response ${email}",
mapping={'email': email.encode('utf-8')})
try:
self.__download(oid)
self.__upload2owncloud()
except Exception as e:
log.exception(e)
self.link = 'error'
self.__log_download_stats(oid, self.link)
self.__send_mail(email)
headers = {"Content-Type": 'text/html'}
return Response(self.localizer.translate(resp), headers=headers)
| 3,879
| 207
| 23
|
67ebedd5c27133e804f63aa2724d413856cdf63d
| 431
|
py
|
Python
|
tests/test_url.py
|
keeranrichardson/python-seo-tool
|
0bc9c675be7c6757649abf06bde7d76d3f75fe81
|
[
"MIT"
] | 1
|
2022-02-20T17:23:41.000Z
|
2022-02-20T17:23:41.000Z
|
tests/test_url.py
|
keeranrichardson/python-seo-tool
|
0bc9c675be7c6757649abf06bde7d76d3f75fe81
|
[
"MIT"
] | null | null | null |
tests/test_url.py
|
keeranrichardson/python-seo-tool
|
0bc9c675be7c6757649abf06bde7d76d3f75fe81
|
[
"MIT"
] | 1
|
2022-03-05T15:41:33.000Z
|
2022-03-05T15:41:33.000Z
|
from urlScanner import UrlScanner
| 28.733333
| 58
| 0.654292
|
from urlScanner import UrlScanner
class TestUrl:
def testUrlExistsReturn200(self):
url = UrlScanner('https://keeranrichardson.com')
assert 200 == url.getStatus()
def testUrlNotExistsReturn404(self):
url = UrlScanner('https://keeranrichardson.com/6')
assert 404 == url.getStatus()
def testErrorReadingUrl(self):
url = UrlScanner('')
assert 'error'==url.getStatus()
| 297
| -7
| 108
|
cf51cf919c2dc41e8d41e4fd269be21fec666712
| 718
|
py
|
Python
|
agent/lm_agent/exceptions.py
|
omnivector-solutions/license-manager
|
9eb1e4569d692aef83a2388096e7413bc010be61
|
[
"MIT"
] | 2
|
2020-11-15T22:54:39.000Z
|
2022-02-15T07:58:55.000Z
|
agent/lm_agent/exceptions.py
|
omnivector-solutions/license-manager
|
9eb1e4569d692aef83a2388096e7413bc010be61
|
[
"MIT"
] | 2
|
2022-02-18T19:36:45.000Z
|
2022-03-16T23:07:44.000Z
|
agent/lm_agent/exceptions.py
|
omnivector-solutions/license-manager
|
9eb1e4569d692aef83a2388096e7413bc010be61
|
[
"MIT"
] | null | null | null |
"""
Custom exceptions for the License Manager Agent.
"""
from buzz import Buzz
class LicenseManagerAuthTokenError(Buzz):
"""Exception for backend connection issues."""
class LicenseManagerBackendConnectionError(Buzz):
"""Exception for backend connection issues."""
class LicenseManagerBackendVersionError(Buzz):
"""Exception for backend/agent version mismatches."""
class LicenseManagerEmptyReportError(Buzz):
"""Exception for empty report when no licenses added in backend."""
class LicenseManagerNonSupportedServerTypeError(Buzz):
"""Exception for entry with non supported server type."""
class LicenseManagerBadServerOutput(Buzz):
"""Exception for license server bad output."""
| 23.933333
| 71
| 0.764624
|
"""
Custom exceptions for the License Manager Agent.
"""
from buzz import Buzz
class LicenseManagerAuthTokenError(Buzz):
"""Exception for backend connection issues."""
class LicenseManagerBackendConnectionError(Buzz):
"""Exception for backend connection issues."""
class LicenseManagerBackendVersionError(Buzz):
"""Exception for backend/agent version mismatches."""
class LicenseManagerEmptyReportError(Buzz):
"""Exception for empty report when no licenses added in backend."""
class LicenseManagerNonSupportedServerTypeError(Buzz):
"""Exception for entry with non supported server type."""
class LicenseManagerBadServerOutput(Buzz):
"""Exception for license server bad output."""
| 0
| 0
| 0
|
3bfbe1a0536d97cc85cad428177e7d4db99e0f9d
| 6,292
|
py
|
Python
|
raptor/wrapper.py
|
jialuechen/raptor
|
bac516a45dfee9d21ac14221a2d9d5bef810cbd0
|
[
"MIT"
] | null | null | null |
raptor/wrapper.py
|
jialuechen/raptor
|
bac516a45dfee9d21ac14221a2d9d5bef810cbd0
|
[
"MIT"
] | null | null | null |
raptor/wrapper.py
|
jialuechen/raptor
|
bac516a45dfee9d21ac14221a2d9d5bef810cbd0
|
[
"MIT"
] | null | null | null |
import dask as da
from raptor.momentum import (
KAMAIndicator,
ROCIndicator,
RSIIndicator,
StochasticOscillator,
StochRSIIndicator,
)
from raptor.trend import (
MACD,
ADXIndicator,
AroonIndicator,
EMAIndicator,
SMAIndicator,
TRIXIndicator,
)
from raptor.volatility import (
AverageTrueRange,
BollingerBands,
)
from raptor.volume import (
AccDistIndexIndicator,
ForceIndexIndicator,
VolumePriceTrendIndicator,
)
| 23.303704
| 85
| 0.635092
|
import dask as da
from raptor.momentum import (
KAMAIndicator,
ROCIndicator,
RSIIndicator,
StochasticOscillator,
StochRSIIndicator,
)
from raptor.trend import (
MACD,
ADXIndicator,
AroonIndicator,
EMAIndicator,
SMAIndicator,
TRIXIndicator,
)
from raptor.volatility import (
AverageTrueRange,
BollingerBands,
)
from raptor.volume import (
AccDistIndexIndicator,
ForceIndexIndicator,
VolumePriceTrendIndicator,
)
def run_volume(
df: da.DataFrame,
high: str,
low: str,
close: str,
volume: str,
fillna: bool = False,
colprefix: str = "",
) -> da.DataFrame:
# Accumulation Distribution Index
df[f"{colprefix}volume_adi"] = AccDistIndexIndicator(
high=df[high], low=df[low], close=df[close], volume=df[volume], fillna=fillna
).acc_dist_index()
# Force Index
df[f"{colprefix}volume_fi"] = ForceIndexIndicator(
close=df[close], volume=df[volume], window=13, fillna=fillna
).force_index()
# Volume Price Trend
df[f"{colprefix}volume_vpt"] = VolumePriceTrendIndicator(
close=df[close], volume=df[volume], fillna=fillna
).volume_price_trend()
return df
def run_volatility(
df: da.DataFrame,
high: str,
low: str,
close: str,
fillna: bool = False,
colprefix: str = "",
) -> da.DataFrame:
# Average True Range
df[f"{colprefix}volatility_atr"] = AverageTrueRange(
close=df[close], high=df[high], low=df[low], window=10, fillna=fillna
).average_true_range()
# Bollinger Bands
indicator_bb = BollingerBands(
close=df[close], window=20, window_dev=2, fillna=fillna
)
df[f"{colprefix}volatility_bbm"] = indicator_bb.bollinger_mavg()
df[f"{colprefix}volatility_bbh"] = indicator_bb.bollinger_hband()
df[f"{colprefix}volatility_bbl"] = indicator_bb.bollinger_lband()
df[f"{colprefix}volatility_bbw"] = indicator_bb.bollinger_wband()
df[f"{colprefix}volatility_bbp"] = indicator_bb.bollinger_pband()
df[f"{colprefix}volatility_bbhi"] = indicator_bb.bollinger_hband_indicator()
df[f"{colprefix}volatility_bbli"] = indicator_bb.bollinger_lband_indicator()
return df
def run_trend(
df: da.DataFrame,
high: str,
low: str,
close: str,
fillna: bool = False,
colprefix: str = "",
) -> da.DataFrame:
# MACD
indicator_macd = MACD(
close=df[close], window_slow=26, window_fast=12, window_sign=9, fillna=fillna
)
df[f"{colprefix}trend_macd"] = indicator_macd.macd()
df[f"{colprefix}trend_macd_signal"] = indicator_macd.macd_signal()
df[f"{colprefix}trend_macd_diff"] = indicator_macd.macd_diff()
# SMAs
df[f"{colprefix}trend_sma_fast"] = SMAIndicator(
close=df[close], window=12, fillna=fillna
).sma_indicator()
df[f"{colprefix}trend_sma_slow"] = SMAIndicator(
close=df[close], window=26, fillna=fillna
).sma_indicator()
# EMAs
df[f"{colprefix}trend_ema_fast"] = EMAIndicator(
close=df[close], window=12, fillna=fillna
).ema_indicator()
df[f"{colprefix}trend_ema_slow"] = EMAIndicator(
close=df[close], window=26, fillna=fillna
).ema_indicator()
# Average Directional Movement Index (ADX)
indicator_adx = ADXIndicator(
high=df[high], low=df[low], close=df[close], window=14, fillna=fillna
)
df[f"{colprefix}trend_adx"] = indicator_adx.adx()
df[f"{colprefix}trend_adx_pos"] = indicator_adx.adx_pos()
df[f"{colprefix}trend_adx_neg"] = indicator_adx.adx_neg()
# TRIX Indicator
df[f"{colprefix}trend_trix"] = TRIXIndicator(
close=df[close], window=15, fillna=fillna
).trix()
# Aroon Indicator
indicator_aroon = AroonIndicator(close=df[close], window=25, fillna=fillna)
df[f"{colprefix}trend_aroon_up"] = indicator_aroon.aroon_up()
df[f"{colprefix}trend_aroon_down"] = indicator_aroon.aroon_down()
df[f"{colprefix}trend_aroon_ind"] = indicator_aroon.aroon_indicator()
return df
def run_momentum(
df: da.DataFrame,
high: str,
low: str,
close: str,
volume: str,
fillna: bool = False,
colprefix: str = "",
) -> da.DataFrame:
# Relative Strength Index (RSI)
df[f"{colprefix}momentum_rsi"] = RSIIndicator(
close=df[close], window=14, fillna=fillna
).rsi()
# Stoch RSI (StochRSI)
indicator_srsi = StochRSIIndicator(
close=df[close], window=14, smooth1=3, smooth2=3, fillna=fillna
)
df[f"{colprefix}momentum_stoch_rsi"] = indicator_srsi.stochrsi()
df[f"{colprefix}momentum_stoch_rsi_k"] = indicator_srsi.stochrsi_k()
df[f"{colprefix}momentum_stoch_rsi_d"] = indicator_srsi.stochrsi_d()
# Stoch Indicator
indicator_so = StochasticOscillator(
high=df[high],
low=df[low],
close=df[close],
window=14,
smooth_window=3,
fillna=fillna,
)
df[f"{colprefix}momentum_stoch"] = indicator_so.stoch()
df[f"{colprefix}momentum_stoch_signal"] = indicator_so.stoch_signal()
# KAMA
df[f"{colprefix}momentum_kama"] = KAMAIndicator(
close=df[close], window=10, pow1=2, pow2=30, fillna=fillna
).kama()
# Rate Of Change
df[f"{colprefix}momentum_roc"] = ROCIndicator(
close=df[close], window=12, fillna=fillna
).roc()
return df
def run_all_da_features(
df: da.DataFrame,
open: str, # noqa
high: str,
low: str,
close: str,
volume: str,
fillna: bool = False,
colprefix: str = "",
) -> da.DataFrame:
df = run_volume(
df=df,
high=high,
low=low,
close=close,
volume=volume,
fillna=fillna,
colprefix=colprefix,
)
df = run_volatility(
df=df, high=high, low=low, close=close, fillna=fillna, colprefix=colprefix
)
df = run_trend(
df=df, high=high, low=low, close=close, fillna=fillna, colprefix=colprefix
)
df = run_momentum(
df=df,
high=high,
low=low,
close=close,
volume=volume,
fillna=fillna,
colprefix=colprefix,
)
return df
| 5,634
| 0
| 115
|
322a0b56205aca331aa6de77b96b5abea4e23791
| 486
|
py
|
Python
|
codes_/0209_Minimum_Size_Subarray_Sum.py
|
SaitoTsutomu/leetcode
|
4656d66ab721a5c7bc59890db9a2331c6823b2bf
|
[
"MIT"
] | null | null | null |
codes_/0209_Minimum_Size_Subarray_Sum.py
|
SaitoTsutomu/leetcode
|
4656d66ab721a5c7bc59890db9a2331c6823b2bf
|
[
"MIT"
] | null | null | null |
codes_/0209_Minimum_Size_Subarray_Sum.py
|
SaitoTsutomu/leetcode
|
4656d66ab721a5c7bc59890db9a2331c6823b2bf
|
[
"MIT"
] | null | null | null |
# %% [209. Minimum Size Subarray Sum](https://leetcode.com/problems/minimum-size-subarray-sum/)
| 37.384615
| 95
| 0.442387
|
# %% [209. Minimum Size Subarray Sum](https://leetcode.com/problems/minimum-size-subarray-sum/)
class Solution:
def minSubArrayLen(self, s: int, nums: List[int]) -> int:
p1, p2, sm, mn = 0, -1, 0, -1
while sm >= s or p2 < len(nums) - 1:
if sm < s:
sm += nums[(p2 := p2 + 1)]
else:
if mn < 0 or p2 - p1 < mn:
mn = p2 - p1
sm -= nums[(p1 := p1 + 1) - 1]
return mn + 1
| 348
| -6
| 48
|
a984bcf3192606be4e00f7ce2c78e708bba3758c
| 8,858
|
py
|
Python
|
tools/model_test.py
|
EmiyaNing/OpenPCDet
|
41ff28209cb000b51626a0ed8593b0adbe3dd447
|
[
"Apache-2.0"
] | null | null | null |
tools/model_test.py
|
EmiyaNing/OpenPCDet
|
41ff28209cb000b51626a0ed8593b0adbe3dd447
|
[
"Apache-2.0"
] | null | null | null |
tools/model_test.py
|
EmiyaNing/OpenPCDet
|
41ff28209cb000b51626a0ed8593b0adbe3dd447
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import tqdm
import torch
import torch.distributed as dist
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
if __name__ == '__main__':
main()
| 38.017167
| 113
| 0.687175
|
import argparse
import datetime
import glob
import os
from pathlib import Path
from test import repeat_eval_ckpt
import tqdm
import torch
import torch.distributed as dist
import torch.nn as nn
from tensorboardX import SummaryWriter
from pcdet.config import cfg, cfg_from_list, cfg_from_yaml_file, log_config_to_file
from pcdet.datasets import build_dataloader
from pcdet.models import build_network, model_fn_decorator
from pcdet.utils import common_utils
from train_utils.optimization import build_optimizer, build_scheduler
def train_one_epoch(model, optimizer, train_loader, model_func, lr_scheduler, accumulated_iter, optim_cfg,
rank, tbar, total_it_each_epoch, dataloader_iter, tb_log=None, leave_pbar=False):
if total_it_each_epoch == len(train_loader):
dataloader_iter = iter(train_loader)
if rank == 0:
pbar = tqdm.tqdm(total=total_it_each_epoch, leave=leave_pbar, desc='train', dynamic_ncols=True)
for cur_it in range(total_it_each_epoch):
try:
batch = next(dataloader_iter)
except StopIteration:
dataloader_iter = iter(train_loader)
batch = next(dataloader_iter)
print('new iters')
lr_scheduler.step(accumulated_iter)
try:
cur_lr = float(optimizer.lr)
except:
cur_lr = optimizer.param_groups[0]['lr']
if tb_log is not None:
tb_log.add_scalar('meta_data/learning_rate', cur_lr, accumulated_iter)
model.train()
optimizer.zero_grad()
loss, tb_dict, disp_dict = model_func(model, batch)
print(loss)
print(tb_dict)
print(disp_dict)
break
if rank == 0:
pbar.close()
return accumulated_iter
def train_model(model, optimizer, train_loader, model_func, lr_scheduler, optim_cfg,
start_epoch, total_epochs, start_iter, rank, tb_log, ckpt_save_dir, train_sampler=None,
lr_warmup_scheduler=None, ckpt_save_interval=1, max_ckpt_save_num=50,
merge_all_iters_to_one_epoch=False):
accumulated_iter = start_iter
with tqdm.trange(start_epoch, total_epochs, desc='epochs', dynamic_ncols=True, leave=(rank == 0)) as tbar:
total_it_each_epoch = len(train_loader)
if merge_all_iters_to_one_epoch:
assert hasattr(train_loader.dataset, 'merge_all_iters_to_one_epoch')
train_loader.dataset.merge_all_iters_to_one_epoch(merge=True, epochs=total_epochs)
total_it_each_epoch = len(train_loader) // max(total_epochs, 1)
dataloader_iter = iter(train_loader)
for cur_epoch in tbar:
if train_sampler is not None:
train_sampler.set_epoch(cur_epoch)
# train one epoch
if lr_warmup_scheduler is not None and cur_epoch < optim_cfg.WARMUP_EPOCH:
cur_scheduler = lr_warmup_scheduler
else:
cur_scheduler = lr_scheduler
accumulated_iter = train_one_epoch(
model, optimizer, train_loader, model_func,
lr_scheduler=cur_scheduler,
accumulated_iter=accumulated_iter, optim_cfg=optim_cfg,
rank=rank, tbar=tbar, tb_log=tb_log,
leave_pbar=(cur_epoch + 1 == total_epochs),
total_it_each_epoch=total_it_each_epoch,
dataloader_iter=dataloader_iter
)
def parse_config():
parser = argparse.ArgumentParser(description='arg parser')
parser.add_argument('--cfg_file', type=str, default=None, help='specify the config for training')
parser.add_argument('--batch_size', type=int, default=None, required=False, help='batch size for training')
parser.add_argument('--epochs', type=int, default=None, required=False, help='number of epochs to train for')
parser.add_argument('--workers', type=int, default=8, help='number of workers for dataloader')
parser.add_argument('--extra_tag', type=str, default='default', help='extra tag for this experiment')
parser.add_argument('--ckpt', type=str, default=None, help='checkpoint to start from')
parser.add_argument('--pretrained_model', type=str, default=None, help='pretrained_model')
parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none')
parser.add_argument('--tcp_port', type=int, default=18888, help='tcp port for distrbuted training')
parser.add_argument('--sync_bn', action='store_true', default=False, help='whether to use sync bn')
parser.add_argument('--fix_random_seed', action='store_true', default=False, help='')
parser.add_argument('--ckpt_save_interval', type=int, default=1, help='number of training epochs')
parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
parser.add_argument('--max_ckpt_save_num', type=int, default=30, help='max number of saved checkpoint')
parser.add_argument('--merge_all_iters_to_one_epoch', action='store_true', default=False, help='')
parser.add_argument('--set', dest='set_cfgs', default=None, nargs=argparse.REMAINDER,
help='set extra config keys if needed')
parser.add_argument('--max_waiting_mins', type=int, default=0, help='max waiting minutes')
parser.add_argument('--start_epoch', type=int, default=0, help='')
parser.add_argument('--save_to_file', action='store_true', default=False, help='')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
cfg.TAG = Path(args.cfg_file).stem
cfg.EXP_GROUP_PATH = '/'.join(args.cfg_file.split('/')[1:-1]) # remove 'cfgs' and 'xxxx.yaml'
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
return args, cfg
def main():
args, cfg = parse_config()
if args.launcher == 'none':
dist_train = False
total_gpus = 1
else:
total_gpus, cfg.LOCAL_RANK = getattr(common_utils, 'init_dist_%s' % args.launcher)(
args.tcp_port, args.local_rank, backend='nccl'
)
dist_train = True
if args.batch_size is None:
args.batch_size = cfg.OPTIMIZATION.BATCH_SIZE_PER_GPU
else:
assert args.batch_size % total_gpus == 0, 'Batch size should match the number of gpus'
args.batch_size = args.batch_size // total_gpus
args.epochs = cfg.OPTIMIZATION.NUM_EPOCHS if args.epochs is None else args.epochs
if args.fix_random_seed:
common_utils.set_random_seed(666)
output_dir = cfg.ROOT_DIR / 'output' / cfg.EXP_GROUP_PATH / cfg.TAG / args.extra_tag
ckpt_dir = output_dir / 'ckpt'
output_dir.mkdir(parents=True, exist_ok=True)
ckpt_dir.mkdir(parents=True, exist_ok=True)
log_file = output_dir / ('log_train_%s.txt' % datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
logger = common_utils.create_logger(log_file, rank=cfg.LOCAL_RANK)
# -----------------------create dataloader & network & optimizer---------------------------
train_set, train_loader, train_sampler = build_dataloader(
dataset_cfg=cfg.DATA_CONFIG,
class_names=cfg.CLASS_NAMES,
batch_size=args.batch_size,
dist=dist_train, workers=args.workers,
logger=logger,
training=True,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch,
total_epochs=args.epochs
)
model = build_network(model_cfg=cfg.MODEL, num_class=len(cfg.CLASS_NAMES), dataset=train_set)
if args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model.cuda()
optimizer = build_optimizer(model, cfg.OPTIMIZATION)
# load checkpoint if it is possible
start_epoch = it = 0
last_epoch = -1
if args.pretrained_model is not None:
model.load_params_from_file(filename=args.pretrained_model, to_cpu=dist_train, logger=logger)
model.train() # before wrap to DistributedDataParallel to support fixed some parameters
lr_scheduler, lr_warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=len(train_loader), total_epochs=args.epochs,
last_epoch=last_epoch, optim_cfg=cfg.OPTIMIZATION
)
train_model(
model,
optimizer,
train_loader,
model_func=model_fn_decorator(),
lr_scheduler=lr_scheduler,
optim_cfg=cfg.OPTIMIZATION,
start_epoch=start_epoch,
total_epochs=args.epochs,
start_iter=it,
rank=cfg.LOCAL_RANK,
tb_log=None,
ckpt_save_dir=ckpt_dir,
train_sampler=train_sampler,
lr_warmup_scheduler=lr_warmup_scheduler,
ckpt_save_interval=args.ckpt_save_interval,
max_ckpt_save_num=args.max_ckpt_save_num,
merge_all_iters_to_one_epoch=args.merge_all_iters_to_one_epoch
)
if __name__ == '__main__':
main()
| 8,181
| 0
| 92
|
78f829f3996b17988d65cadef8296a5ba880a14e
| 18,198
|
py
|
Python
|
upstream/test/functional-tests-legacy/PfwTestCase/Types/tSTRING_128.py
|
TinkerEdgeR-Android/external_parameter-framework
|
108db75a59dbea562ac4bcaf8c6cc862c4919af0
|
[
"BSD-3-Clause"
] | null | null | null |
upstream/test/functional-tests-legacy/PfwTestCase/Types/tSTRING_128.py
|
TinkerEdgeR-Android/external_parameter-framework
|
108db75a59dbea562ac4bcaf8c6cc862c4919af0
|
[
"BSD-3-Clause"
] | null | null | null |
upstream/test/functional-tests-legacy/PfwTestCase/Types/tSTRING_128.py
|
TinkerEdgeR-Android/external_parameter-framework
|
108db75a59dbea562ac4bcaf8c6cc862c4919af0
|
[
"BSD-3-Clause"
] | null | null | null |
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import commands, string, random
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type UINT8_S - range [-100, 100]
| 60.66
| 138
| 0.477415
|
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import commands, string, random
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type UINT8_S - range [-100, 100]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/STR_CHAR128"
self.pfw.sendCmd("setTuningMode", "on")
self.size_max=128
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Digits_String_Case(self):
"""
|============================================================|
| Testing data types - String |
| max number of char = 128 |
|============================================================|
| File : tSTRING_128.py |
| Version : 01 |
| |
| Test cases : |
| - STR_CHAR128 parameter nominal value = string_Conf_0 |
| - STR_CHAR128 parameter empty value = '' |
| - STR_CHAR128 parameter full value = generate randomly 128 |
| letters characters |
| - STR_CHAR128 parameter space character value = test string|
| - STR_CHAR128 parameter full digits value = generate |
| randomly 128 digits char |
| - STR_CHAR128 parameter oversize value = generate randomly |
| 129 char |
| |
|============================================================|
| STR_CHAR128 parameter in digits case = 128 digits char |
|============================================================|
| Test Case description : |
| - STR_CHAR128 parameter in digit case = 128 digits char |
| Tested commands : |
| * setParameter |
| - getParameter |
| Expected result : |
| - STR_CHAR128 parameter set to the same 128 digits char |
| (blackboard and filesystem values checked) |
|============================================================|
"""
log.D(self.test_Digits_String_Case.__doc__)
log.I("STR_CHAR128 parameter initial state = string_Conf_0")
value = ""
for i in range(self.size_max-1):
value=value+str(random.choice(string.digits))
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s -> %s" % (self.param_name, err))
assert out == "Done", log.F(out)
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when getting parameter %s -> %s" % (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s" % (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1] == value, log.F("FILESYSTEM : parameter update error")
def test_Empty_String_Case(self):
"""
|============================================================|
| STR_CHAR128 parameter empty string = \'\' |
|============================================================|
| Test Case description : |
| - STR_CHAR128 parameter in empty string case = \'\' |
| Tested commands : |
| * setParameter |
| - getParameter |
| Expected result : |
| - STR_CHAR128 parameter set empty |
| (blackboard and filesystem values checked) |
|============================================================|
"""
log.D(self.test_Empty_String_Case.__doc__)
log.I("STR_CHAR128 parameter empty string = \'\'")
value = ""
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s -> %s" % (self.param_name, err))
assert out == "Done", log.F(out)
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when getting parameter %s -> %s" % (self.param_name, err))
assert out == "", log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s" % (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1] == "", log.F("FILESYSTEM : parameter update error")
def test_OverSize_String_Case(self):
"""
|============================================================|
| STR_CHAR128 parameter oversize |
|============================================================|
| Test Case description : |
| - STR_CHAR128 parameter in oversize case = 129 random char |
| Tested commands : |
| * setParameter |
| - getParameter |
| Expected result : |
| - error detected |
| - STR_CHAR128 parameter not updated |
|============================================================|
"""
log.D(self.test_OverSize_String_Case.__doc__)
log.I("STR_CHAR128 parameter size max=128 character")
value=""
for i in range(self.size_max+1):
value=value+str(random.choice(string.letters))
param_check = open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value, expectSuccess=False)
assert err == None, log.E("when setting parameter %s -> %s" % (self.param_name, err))
assert out != "Done", log.F("Error not detected when setting parameter %s over size" % (self.param_name))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1] == param_check, log.F("FILESYSTEM : Forbiden parameter change")
def test_Full_Letters_String_Case(self):
"""
|============================================================|
| STR_CHAR128 parameter full size test case |
|============================================================|
| Test Case description : |
| - STR_CHAR128 parameter in fullsize case = 128 random char |
| Tested commands : |
| * setParameter |
| - getParameter |
| Expected result : |
| - STR_CHAR128 parameter set to the same 128 letters char |
| (blackboard and filesystem values checked) |
|============================================================|
"""
log.D(self.test_Full_Letters_String_Case.__doc__)
log.I("STR_CHAR128 parameter initial state : string")
value = ""
for i in range(self.size_max-1):
value=value+str(random.choice(string.letters))
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s -> %s" % (self.param_name, err))
assert out == "Done", log.F("Expected : Done, result : %s" % (out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("When setting parameter %s : %s" % (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s" % (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1] == value, log.F("FILESYSTEM : parameter update error")
def test_Nominal_String_Case(self):
"""
|============================================================|
| STR_CHAR128 parameter Nominal test case |
|============================================================|
| Test Case description : |
| - STR_CHAR128 parameter in nominal case = TestString |
| Tested commands : |
| * setParameter |
| - getParameter |
| Expected result : |
| - STR_CHAR128 parameter set to TestString |
| (blackboard and filesystem values checked) |
|============================================================|
"""
log.D(self.test_Nominal_String_Case.__doc__)
log.I("STR_CHAR128 parameter nominal string = TestString")
value = "TestString"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("When setting parameter %s -> %s" % (self.param_name, err))
assert out == "Done", log.F("Expected : Done, found : %s" % (out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("When setting parameter %s -> %s" % (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s" % (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1] == value, log.F("FILESYSTEM : parameter update error")
def test_Punctuation_Empty_Parenthese_String_Case(self):
"""
|============================================================|
| STR_CHAR128 parameter empty Parenthese char test case |
|============================================================|
| Test Case description : |
| - STR_CHAR128 parameter = TestParenthese() |
| Tested commands : |
| * setParameter |
| - getParameter |
| Expected result : |
| - Not Determined now |
|============================================================|
"""
log.D(self.test_Punctuation_Empty_Parenthese_String_Case.__doc__)
value = "ParentheseTest()"
log.I("STR_CHAR128 parameter Parenthese Char = %s" % (value))
param_check = open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("When setting parameter %s : %s" % (self.param_name, err))
assert out == "Done", log.F("Expected : Done, found : %s" % (out))
#Get parameter value
out, err = self.pfw.sendCmd("getParameter", self.param_name)
assert err == None, log.E("When getting parameter %s : %s" % (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s" % (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1] == value, log.F("FILESYSTEM : parameter update error")
def test_Punctuation_Full_Parenthese_String_Case(self):
"""
|============================================================|
| STR_CHAR128 parameter full Parenthese char test case |
|============================================================|
| Test Case description : |
| - STR_CHAR128 parameter = TestParenthese(test) |
| Tested commands : |
| * setParameter |
| - getParameter |
| Expected result : |
| - Not Determined now |
|============================================================|
"""
log.D(self.test_Punctuation_Full_Parenthese_String_Case.__doc__)
value = "ParentheseTest(test)"
log.I("STR_CHAR128 parameter Parenthese Char = %s" % (value))
param_check = open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1]
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("When setting parameter %s : %s" % (self.param_name, err))
assert out == "Done", log.F("Expected : Done, found : %s" % (out))
#Get parameter value
out, err = self.pfw.sendCmd("getParameter", self.param_name)
assert err == None, log.E("When getting parameter %s : %s" % (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s" % (self.param_name, value, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1] == value, log.F("FILESYSTEM : parameter update error")
def test_SpaceChar_String_Case(self):
"""
|============================================================|
| STR_CHAR128 parameter space char test case |
|============================================================|
| Test Case description : |
| - STR_CHAR128 parameter = Test String |
| Tested commands : |
| * setParameter |
| - getParameter |
| Expected result : |
| - Not Determined now |
|============================================================|
"""
log.D(self.test_SpaceChar_String_Case.__doc__)
value = "Test String"
log.I("STR_CHAR128 parameter Parenthese Char = %s" % (value))
value_check = "Test String"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("When setting parameter %s : %s" % (self.param_name, err))
assert out == "Done", log.F("Expected : Done, found : %s" % (out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("When setting parameter %s : %s" % (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s" % (self.param_name, value_check, out))
#Check parameter value on filesystem
assert open(os.environ["PFW_RESULT"] + "/STR_CHAR128").read()[:-1] == value_check, log.F("FILESYSTEM : parameter update error")
| 176
| 16,253
| 22
|
4a3369cabd1ff491008878d6ff102afb077304b4
| 5,180
|
py
|
Python
|
predict_gender.py
|
chenjianxiong/coding_contest_2018
|
69687b73d4448a6cadf6130f462e9bdca2f20bc9
|
[
"MIT"
] | null | null | null |
predict_gender.py
|
chenjianxiong/coding_contest_2018
|
69687b73d4448a6cadf6130f462e9bdca2f20bc9
|
[
"MIT"
] | null | null | null |
predict_gender.py
|
chenjianxiong/coding_contest_2018
|
69687b73d4448a6cadf6130f462e9bdca2f20bc9
|
[
"MIT"
] | null | null | null |
import sys
import math
import numpy as np
if __name__ == "__main__":
# We're using 70% of the data for training
TRAIN_SPLIT = 0.8
GENDER_MALE = 1
GENDER_FEMALE = 0
alpha = 0.54
X_train = None
Y_train = None
X_validation = None
Y_validation = None
X_test = None
Y_test = None
Best_theta = None
fileName = "/var/www/html/training_dataset.txt"
g_feature_names = []
g_vocabulary = {}
names = np.genfromtxt(fileName, delimiter = ",", dtype = "U25",
autostrip = True)
np.random.shuffle(names)
features = np.vectorize(features)
X = features(names[:, 0])
Y = np.array([GENDER_MALE if x == "male" else GENDER_FEMALE for x in names[:, 1]] )
# creating testing and training set
X_train, X_validation = X[:int(TRAIN_SPLIT * len(X))], X[int(TRAIN_SPLIT * len(X)):]
Y_train, Y_validation = Y[:int(TRAIN_SPLIT * len(Y))], Y[int(TRAIN_SPLIT * len(Y)):]
(g_feature_names, g_vocabulary) = fit(X_train)
initial_theta = np.zeros((len(g_feature_names), 1), dtype=np.float64)
X_validation = transform(X_validation, g_vocabulary)
iterations = len(X_validation)
theta = logistic_regression_by_stochastic_gradient_descent(
transform(X_train, g_vocabulary), Y_train,
alpha,initial_theta)
do_test(g_vocabulary)
| 25.771144
| 93
| 0.547876
|
import sys
import math
import numpy as np
def features(name):
name = name.lower()
return {
'first-letter': name[0], # First letter
'first2-letters': name[0:2], # First 2 letters
'first3-letters': name[0:3], # First 3 letters
'last-letter': name[-1],
'last2-letters': name[-2:],
'last3-letters': name[-3:],
}
def fit(x_set):
feature_names = []
vocab = {}
for x in x_set:
for f, v in x.items():
f = "%s%s%s" % (f, '=', v)
if f not in vocab:
feature_names.append(f)
vocab[f] = len(vocab)
feature_names.sort()
vocab = dict((f, i) for i, f in enumerate(feature_names))
return (feature_names, vocab)
def transform( x_set, vocab):
xa = np.zeros((len(x_set), 6), dtype=int)
for i, x in enumerate(x_set):
for f, v in x.items():
f_v = "%s%s%s" % (f, "=", v)
try:
xa[i][features_index()[f]] = vocab[f_v]
except KeyError:
pass
return xa
def sigmoid(z):
try:
g_of_z = float(1.0 / float((1.0 + math.exp(-1.0*z))))
except:
print("z, math", z)
return g_of_z
def hypothesis(theta, x):
z = 0
#print(type(theta), type(x))
for i in range(6):
pos = x[i]
#print("Hypothesis:", i)
z += theta[pos]
return sigmoid(z)
def cost_function(x_set,y_set,theta,m):
sum_of_errors = 0
for i in range(m):
xi = x_set[i]
hi = hypothesis(theta,xi)
if y_set[i] == 1:
error = y_set[i] * math.log(hi)
elif y_set[i] == 0:
error = (1-Y[i]) * math.log(1-hi)
sum_of_errors += error
const = -1/m
J = const * sum_of_errors
#print( 'cost is ', J )
return J
def logistic_regression_by_stochastic_gradient_descent(x_set,y_set,alpha, theta):
m = len(y_set)
n = len(features_index()) # here we have 6 features
best_theta = []
max_score = 0.0
for i in range(m):
for idx in range(n):# features
j = x_set[i][idx]
theta[j] = theta[j] - alpha * (hypothesis(theta, x_set[i]) - y_set[i]) #xij is 1
if i % 10 == 0:
score = calculate_score(theta)
if score > max_score:
max_score = score
best_theta = theta
return best_theta
def calculate_score(theta):
score = 0
length = len(X_validation)
for i in range(length):
h_value = hypothesis(theta, X_validation[i])
if h_value > 0.5:
prediction = 1
else:
prediction = 0
answer = Y_validation[i]
if prediction == answer:
score += 1
score = float(score) / float(length)
return score
def features_index():
features_index = {
'first-letter': 0,
'first2-letters': 1, # First 2 letters
'first3-letters': 2, # First 3 letters
'last-letter': 3,
'last2-letters': 4,
'last3-letters': 5,
}
return features_index
def get_gender(predict):
gender = "female"
if predict == GENDER_MALE:
gender = "male"
return gender
def do_test(vocabulary):
filename_test = sys.argv[1]
names_test = np.genfromtxt(filename_test, delimiter = ",", dtype = "U25",
autostrip = True)
x_test = features(names_test)
x_test = transform(x_test, vocabulary)
length = len(x_test)
predict = 0
for i in range(length):
h_value = hypothesis(theta, x_test[i])
if h_value > 0.5:
predict = 1
else:
predict = 0
print("{},{}".format(names_test[i], get_gender(predict)))
if __name__ == "__main__":
# We're using 70% of the data for training
TRAIN_SPLIT = 0.8
GENDER_MALE = 1
GENDER_FEMALE = 0
alpha = 0.54
X_train = None
Y_train = None
X_validation = None
Y_validation = None
X_test = None
Y_test = None
Best_theta = None
fileName = "/var/www/html/training_dataset.txt"
g_feature_names = []
g_vocabulary = {}
names = np.genfromtxt(fileName, delimiter = ",", dtype = "U25",
autostrip = True)
np.random.shuffle(names)
features = np.vectorize(features)
X = features(names[:, 0])
Y = np.array([GENDER_MALE if x == "male" else GENDER_FEMALE for x in names[:, 1]] )
# creating testing and training set
X_train, X_validation = X[:int(TRAIN_SPLIT * len(X))], X[int(TRAIN_SPLIT * len(X)):]
Y_train, Y_validation = Y[:int(TRAIN_SPLIT * len(Y))], Y[int(TRAIN_SPLIT * len(Y)):]
(g_feature_names, g_vocabulary) = fit(X_train)
initial_theta = np.zeros((len(g_feature_names), 1), dtype=np.float64)
X_validation = transform(X_validation, g_vocabulary)
iterations = len(X_validation)
theta = logistic_regression_by_stochastic_gradient_descent(
transform(X_train, g_vocabulary), Y_train,
alpha,initial_theta)
do_test(g_vocabulary)
| 3,491
| 0
| 303
|
e4bee58bfdb19c78f18c0a8361e244a7c7f041cb
| 898
|
py
|
Python
|
app/migrations/0002_auto_20210215_2026.py
|
fossabot/stream_vod_indexer
|
58bff60cc4adb1b8e5966134d2e560e59464d196
|
[
"MIT"
] | null | null | null |
app/migrations/0002_auto_20210215_2026.py
|
fossabot/stream_vod_indexer
|
58bff60cc4adb1b8e5966134d2e560e59464d196
|
[
"MIT"
] | null | null | null |
app/migrations/0002_auto_20210215_2026.py
|
fossabot/stream_vod_indexer
|
58bff60cc4adb1b8e5966134d2e560e59464d196
|
[
"MIT"
] | 1
|
2021-02-18T14:25:39.000Z
|
2021-02-18T14:25:39.000Z
|
# Generated by Django 3.1.6 on 2021-02-15 14:56
from django.db import migrations, models
| 26.411765
| 63
| 0.570156
|
# Generated by Django 3.1.6 on 2021-02-15 14:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='gamestorage',
name='game_webpage_link',
field=models.URLField(blank=True, max_length=1000),
),
migrations.AddField(
model_name='streamstorage',
name='vod_link',
field=models.URLField(blank=True, max_length=1000),
),
migrations.AddField(
model_name='streamstorage',
name='vod_status',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='gamestorage',
name='game_slug',
field=models.SlugField(max_length=200),
),
]
| 0
| 784
| 23
|
15bd8ecc1888b29e46ba8af98328ca4106bb49d4
| 781
|
py
|
Python
|
src/demo/app.py
|
GonnaFlyMethod/aiohttp_simple_template
|
7bd735c182ac6e45a8fe08485386a6a4465f192a
|
[
"MIT"
] | null | null | null |
src/demo/app.py
|
GonnaFlyMethod/aiohttp_simple_template
|
7bd735c182ac6e45a8fe08485386a6a4465f192a
|
[
"MIT"
] | null | null | null |
src/demo/app.py
|
GonnaFlyMethod/aiohttp_simple_template
|
7bd735c182ac6e45a8fe08485386a6a4465f192a
|
[
"MIT"
] | null | null | null |
import jinja2
import aiohttp_jinja2
import urllib.parse as up
import asyncpg
from aiohttp import web
from .routes import setup_routes
from .config import config_obj as config
| 25.193548
| 76
| 0.641485
|
import jinja2
import aiohttp_jinja2
import urllib.parse as up
import asyncpg
from aiohttp import web
from .routes import setup_routes
from .config import config_obj as config
async def on_start(app):
up.uses_netloc.append("postgres")
url = up.urlparse(config['database_url'])
connection = await asyncpg.connect(user=url.username,
password=url.password,
database=url.path[1:],
host=url.hostname,
port=url.port)
config['db'] = connection
print("Connected to db!")
async def create_app():
app = web.Application()
aiohttp_jinja2.setup(app, loader=jinja2.PackageLoader('demo', 'templates'))
setup_routes(app)
await on_start(app)
return app
| 555
| 0
| 46
|
00e99788eef7360e5057d06d1769c244b69763ef
| 10,949
|
py
|
Python
|
lolesport_parser/dataclasses/game_details_v5.py
|
DrArtemi/riot-api
|
a68bf94061a3c63e511418669097499c3e2c055d
|
[
"MIT"
] | null | null | null |
lolesport_parser/dataclasses/game_details_v5.py
|
DrArtemi/riot-api
|
a68bf94061a3c63e511418669097499c3e2c055d
|
[
"MIT"
] | null | null | null |
lolesport_parser/dataclasses/game_details_v5.py
|
DrArtemi/riot-api
|
a68bf94061a3c63e511418669097499c3e2c055d
|
[
"MIT"
] | null | null | null |
"""
This file has been auto-generated by pydantic.
TODO: Maybe some fields should be renamed or the architecture should be changed a bit
"""
from __future__ import annotations
from typing import List, Optional
from pydantic import BaseModel, Field
| 32.489614
| 96
| 0.728286
|
"""
This file has been auto-generated by pydantic.
TODO: Maybe some fields should be renamed or the architecture should be changed a bit
"""
from __future__ import annotations
from typing import List, Optional
from pydantic import BaseModel, Field
class Challenges(BaseModel):
field_12AssistStreakCount: Optional[int] = Field(..., alias='12AssistStreakCount')
abilityUses: Optional[int] = None
acesBefore15Minutes: Optional[int] = None
alliedJungleMonsterKills: Optional[float] = None
baronBuffGoldAdvantageOverThreshold: Optional[int] = None
baronTakedowns: Optional[int] = None
blastConeOppositeOpponentCount: Optional[int] = None
bountyGold: Optional[int] = None
buffsStolen: Optional[int] = None
completeSupportQuestInTime: Optional[int] = None
controlWardTimeCoverageInRiverOrEnemyHalf: Optional[float] = None
controlWardsPlaced: Optional[int] = None
damagePerMinute: Optional[float] = None
damageTakenOnTeamPercentage: Optional[float] = None
dancedWithRiftHerald: Optional[int] = None
deathsByEnemyChamps: Optional[int] = None
dodgeSkillShotsSmallWindow: Optional[int] = None
doubleAces: Optional[int] = None
dragonTakedowns: Optional[int] = None
earliestBaron: Optional[float] = None
effectiveHealAndShielding: Optional[float] = None
elderDragonKillsWithOpposingSoul: Optional[int] = None
elderDragonMultikills: Optional[int] = None
enemyChampionImmobilizations: Optional[int] = None
enemyJungleMonsterKills: Optional[float] = None
epicMonsterKillsNearEnemyJungler: Optional[int] = None
epicMonsterKillsWithin30SecondsOfSpawn: Optional[int] = None
epicMonsterSteals: Optional[int] = None
epicMonsterStolenWithoutSmite: Optional[int] = None
firstTurretKilledTime: Optional[float] = None
flawlessAces: Optional[int] = None
fullTeamTakedown: Optional[int] = None
gameLength: Optional[float] = None
getTakedownsInAllLanesEarlyJungleAsLaner: Optional[int] = None
goldPerMinute: Optional[float] = None
hadAfkTeammate: Optional[int] = None
hadOpenNexus: Optional[int] = None
highestCrowdControlScore: Optional[int] = None
immobilizeAndKillWithAlly: Optional[int] = None
initialBuffCount: Optional[int] = None
initialCrabCount: Optional[int] = None
jungleCsBefore10Minutes: Optional[float] = None
junglerKillsEarlyJungle: Optional[int] = None
junglerTakedownsNearDamagedEpicMonster: Optional[int] = None
kTurretsDestroyedBeforePlatesFall: Optional[int] = None
kda: Optional[float] = None
killAfterHiddenWithAlly: Optional[int] = None
killParticipation: Optional[float] = None
killedChampTookFullTeamDamageSurvived: Optional[int] = None
killsNearEnemyTurret: Optional[int] = None
killsOnLanersEarlyJungleAsJungler: Optional[int] = None
killsOnOtherLanesEarlyJungleAsLaner: Optional[int] = None
killsOnRecentlyHealedByAramPack: Optional[int] = None
killsUnderOwnTurret: Optional[int] = None
killsWithHelpFromEpicMonster: Optional[int] = None
knockEnemyIntoTeamAndKill: Optional[int] = None
landSkillShotsEarlyGame: Optional[int] = None
laneMinionsFirst10Minutes: Optional[int] = None
legendaryCount: Optional[int] = None
lostAnInhibitor: Optional[int] = None
maxKillDeficit: Optional[int] = None
moreEnemyJungleThanOpponent: Optional[float] = None
multiKillOneSpell: Optional[int] = None
multiTurretRiftHeraldCount: Optional[int] = None
multikills: Optional[int] = None
multikillsAfterAggressiveFlash: Optional[int] = None
mythicItemUsed: Optional[int] = None
outerTurretExecutesBefore10Minutes: Optional[int] = None
outnumberedKills: Optional[int] = None
outnumberedNexusKill: Optional[int] = None
perfectDragonSoulsTaken: Optional[int] = None
perfectGame: Optional[int] = None
pickKillWithAlly: Optional[int] = None
poroExplosions: Optional[int] = None
quickCleanse: Optional[int] = None
quickFirstTurret: Optional[int] = None
quickSoloKills: Optional[int] = None
riftHeraldTakedowns: Optional[int] = None
saveAllyFromDeath: Optional[int] = None
scuttleCrabKills: Optional[int] = None
skillshotsDodged: Optional[int] = None
skillshotsHit: Optional[int] = None
snowballsHit: Optional[int] = None
soloBaronKills: Optional[int] = None
soloKills: Optional[int] = None
soloTurretsLategame: Optional[int] = None
stealthWardsPlaced: Optional[int] = None
survivedSingleDigitHpCount: Optional[int] = None
survivedThreeImmobilizesInFight: Optional[int] = None
takedownOnFirstTurret: Optional[int] = None
takedowns: Optional[int] = None
takedownsAfterGainingLevelAdvantage: Optional[int] = None
takedownsBeforeJungleMinionSpawn: Optional[int] = None
takedownsFirst25Minutes: Optional[int] = None
takedownsInAlcove: Optional[int] = None
takedownsInEnemyFountain: Optional[int] = None
teamBaronKills: Optional[int] = None
teamDamagePercentage: Optional[float] = None
teamElderDragonKills: Optional[int] = None
teamRiftHeraldKills: Optional[int] = None
threeWardsOneSweeperCount: Optional[int] = None
tookLargeDamageSurvived: Optional[int] = None
turretPlatesTaken: Optional[int] = None
turretTakedowns: Optional[int] = None
turretsTakenWithRiftHerald: Optional[int] = None
twentyMinionsIn3SecondsCount: Optional[int] = None
unseenRecalls: Optional[int] = None
visionScorePerMinute: Optional[float] = None
wardTakedowns: Optional[int] = None
wardTakedownsBefore20M: Optional[int] = None
wardsGuarded: Optional[int] = None
earliestDragonTakedown: Optional[float] = None
teleportTakedowns: Optional[int] = None
earlyLaningPhaseGoldExpAdvantage: Optional[float] = None
highestWardKills: Optional[int] = None
laningPhaseGoldExpAdvantage: Optional[float] = None
maxCsAdvantageOnLaneOpponent: Optional[float] = None
maxLevelLeadLaneOpponent: Optional[int] = None
visionScoreAdvantageLaneOpponent: Optional[float] = None
highestChampionDamage: Optional[int] = None
fasterSupportQuestCompletion: Optional[int] = None
class StatPerks(BaseModel):
defense: int
flex: int
offense: int
class Selection(BaseModel):
perk: int
var1: int
var2: int
var3: int
class Style(BaseModel):
description: str
selections: List[Selection]
style: int
class Perks(BaseModel):
statPerks: StatPerks
styles: List[Style]
class Participant(BaseModel):
assists: int
baronKills: Optional[int] = None
bountyLevel: Optional[int] = None
challenges: Optional[Challenges] = None
champExperience: Optional[int] = None
champLevel: int
championId: int
championName: Optional[str] = None
championTransform: Optional[int] = None
consumablesPurchased: Optional[int] = None
damageDealtToBuildings: Optional[int] = None
damageDealtToObjectives: int
damageDealtToTurrets: int
damageSelfMitigated: int
deaths: int
detectorWardsPlaced: Optional[int] = None
doubleKills: int
dragonKills: Optional[int] = None
firstBloodAssist: bool
firstBloodKill: bool
firstTowerAssist: bool
firstTowerKill: bool
gameEndedInEarlySurrender: Optional[bool] = None
gameEndedInSurrender: Optional[bool] = None
goldEarned: int
goldSpent: int
individualPosition: Optional[str] = None
inhibitorKills: int
inhibitorTakedowns: Optional[int] = None
inhibitorsLost: Optional[int] = None
item0: int
item1: int
item2: int
item3: int
item4: int
item5: int
item6: int
itemsPurchased: Optional[int] = None
killingSprees: int
kills: int
lane: str
largestCriticalStrike: int
largestKillingSpree: int
largestMultiKill: int
longestTimeSpentLiving: int
magicDamageDealt: int
magicDamageDealtToChampions: int
magicDamageTaken: int
neutralMinionsKilled: int
nexusKills: Optional[int] = None
nexusLost: Optional[int] = None
nexusTakedowns: Optional[int] = None
objectivesStolen: Optional[int] = None
objectivesStolenAssists: Optional[int] = None
participantId: int
pentaKills: int
perks: Optional[Perks] = None # FIXME: Optional because idk how to transform v4 to v5 perks
physicalDamageDealt: int
physicalDamageDealtToChampions: int
physicalDamageTaken: int
profileIcon: int
quadraKills: int
riotIdName: Optional[str] = None
riotIdTagline: Optional[str] = None
role: str
sightWardsBoughtInGame: int
spell1Casts: Optional[int] = None
spell1Id: int
spell2Casts: Optional[int] = None
spell2Id: int
spell3Casts: Optional[int] = None
spell4Casts: Optional[int] = None
summoner1Casts: Optional[int] = None
summoner2Casts: Optional[int] = None
summonerId: Optional[int] = None
summonerLevel: Optional[int] = None
summonerName: str
teamEarlySurrendered: Optional[bool] = None
teamId: int
teamPosition: Optional[str] = None
timeCCingOthers: int
timePlayed: Optional[int] = None
totalDamageDealt: int
totalDamageDealtToChampions: int
totalDamageShieldedOnTeammates: Optional[int] = None
totalDamageTaken: int
totalHeal: int
totalHealsOnTeammates: Optional[int] = None
totalMinionsKilled: int
totalTimeCCDealt: int
totalTimeSpentDead: Optional[int] = None
totalUnitsHealed: int
tripleKills: int
trueDamageDealt: int
trueDamageDealtToChampions: int
trueDamageTaken: int
turretKills: int
turretTakedowns: Optional[int] = None
turretsLost: Optional[int] = None
unrealKills: int
visionScore: int
visionWardsBoughtInGame: int
wardsKilled: int
wardsPlaced: int
win: bool
class Ban(BaseModel):
championId: int
pickTurn: int
class Baron(BaseModel):
first: bool
kills: int
class Champion(BaseModel):
first: bool
kills: int
class Dragon(BaseModel):
first: bool
kills: int
class Inhibitor(BaseModel):
first: bool
kills: int
class RiftHerald(BaseModel):
first: bool
kills: int
class Tower(BaseModel):
first: bool
kills: int
class Objectives(BaseModel):
baron: Baron
champion: Optional[Champion] = None
dragon: Dragon
inhibitor: Inhibitor
riftHerald: RiftHerald
tower: Tower
class Team(BaseModel):
bans: List[Ban]
objectives: Objectives
teamId: int
win: bool
class GameDetails(BaseModel):
gameCreation: int
gameDuration: int
gameEndTimestamp: Optional[int] = None
gameId: int
gameMode: str
gameName: Optional[str] = None
gameStartTimestamp: Optional[int] = None
gameType: str
gameVersion: str
mapId: int
participants: List[Participant]
platformId: str
queueId: int
seasonId: int
teams: List[Team]
tournamentCode: Optional[str] = None
| 0
| 10,314
| 368
|