hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
36b4217be63fc502a7a8b608b61caf14733e4c6e
| 1,477
|
py
|
Python
|
carla_ros_bridge/src/carla_ros_bridge/coordinate_converter.py
|
OlafOrangi/ros-bridge
|
732d5f99e5e1f4d0ea7e4873ccc34f0a40f1203c
|
[
"MIT"
] | null | null | null |
carla_ros_bridge/src/carla_ros_bridge/coordinate_converter.py
|
OlafOrangi/ros-bridge
|
732d5f99e5e1f4d0ea7e4873ccc34f0a40f1203c
|
[
"MIT"
] | null | null | null |
carla_ros_bridge/src/carla_ros_bridge/coordinate_converter.py
|
OlafOrangi/ros-bridge
|
732d5f99e5e1f4d0ea7e4873ccc34f0a40f1203c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from geometry_msgs.msg import Pose, Point, Quaternion, Vector3
import numpy as np
import tf
def convert_pose(pose):
"""
convert pose between left and right-hand coordinate system
:param pose: pose to be converted
:return: converted pose
"""
data = Pose()
data.position = convert_vector3(pose.position)
data.orientation = convert_quaternion(pose.orientation)
return data
def convert_vector3(pt):
"""
convert vector3 between left and right-hand coordinate system
:param pt: point to be converted
:return: converted point
"""
return Vector3(pt.x, -pt.y, pt.z)
def convert_point(pt):
"""
convert point between left and right-hand coordinate system
:param pt: point to be converted
:return: converted point
"""
return Point(pt.x, -pt.y, pt.z)
def convert_quaternion(q):
"""
convert quaternion between left and right-hand coordinate system
:param q: quaternion to be converted
:return: converted quaternion
"""
euler = tf.transformations.euler_from_quaternion([q.x, q.y, q.z, q.w])
euler = (euler[0], euler[1], -euler[2])
return Quaternion(*tf.transformations.quaternion_from_euler(*euler))
def convert_euler(euler):
"""
convert euler angles between left and right-hand coordinate system
:param euler: euler angles to be converted
:return: converted euler angles
"""
return Vector3(euler.x, euler.y, -euler.z)
| 26.375
| 74
| 0.688558
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 765
| 0.517942
|
36b50824ddb6f2e96f0d94699793a7e9265c44f3
| 518
|
py
|
Python
|
models/IFR_generalized_SB.py
|
rileymcmorrow/C-SFRAT
|
c696942940118172dfb2c3b8cc27b8d2fd5a5a17
|
[
"MIT"
] | null | null | null |
models/IFR_generalized_SB.py
|
rileymcmorrow/C-SFRAT
|
c696942940118172dfb2c3b8cc27b8d2fd5a5a17
|
[
"MIT"
] | 3
|
2021-03-09T16:13:59.000Z
|
2021-09-20T16:50:07.000Z
|
models/IFR_generalized_SB.py
|
rileymcmorrow/C-SFRAT
|
c696942940118172dfb2c3b8cc27b8d2fd5a5a17
|
[
"MIT"
] | 4
|
2021-07-20T18:01:12.000Z
|
2021-11-22T10:13:35.000Z
|
from core.model import Model
class IFR_Generalized_SB(Model):
name = "IFR generalized Salvia & Bollinger"
shortName = "IFRGSB"
# initial parameter estimates
beta0 = 0.01
parameterEstimates = (0.1, 0.1)
def hazardSymbolic(self, i, args):
# args -> (c, alpha)
f = 1 - args[0] / ((i - 1) * args[1] + 1)
return f
def hazardNumerical(self, i, args):
# args -> (c, alpha)
f = 1 - args[0] / ((i - 1) * args[1] + 1)
return f
| 24.666667
| 50
| 0.525097
| 482
| 0.930502
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.223938
|
36b666e75f8d2123fc2f466527229d2f55e94174
| 1,263
|
py
|
Python
|
TrendTrading/ProbModel/CheckScripts/updated market indicator.py
|
benjabee10/WKUResearch
|
5cc384c0e0c1afc82c38a9e6eb63b80c85af7c97
|
[
"MIT"
] | null | null | null |
TrendTrading/ProbModel/CheckScripts/updated market indicator.py
|
benjabee10/WKUResearch
|
5cc384c0e0c1afc82c38a9e6eb63b80c85af7c97
|
[
"MIT"
] | null | null | null |
TrendTrading/ProbModel/CheckScripts/updated market indicator.py
|
benjabee10/WKUResearch
|
5cc384c0e0c1afc82c38a9e6eb63b80c85af7c97
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import talib
big= 200
small= 50
threshold=0.02
#context.market (shortperiod, longperiod):
#Market Values= 0-negative, 1-no trend, 2-positive
def initialize(context):
context.spy= sid(8554)
schedule_function(check)
def check(context, data):
spydata= data.history(context.spy, 'price', big+5, '1d')
lAvg= talib.SMA(spydata, small)[-1]
sAvg= talib.SMA(spydata, big)[-1]
shortAvgY= talib.SMA(spydata, small)[-2]
longAvgY= talib.SMA(spydata, big)[-2]
shortp= conditionCheck(sd, md, threshold)
longp= 2*(conditionCheck(md, ld, threshold))
context.markettrack= context.market
def conditionCheck(small, large, smallY, largeY var):
if small > (1+var)*small and large > (1+var)*large:
return 1
elif (1-var)*large < small < (1+var)*large:
return 0
elif small < (1-var)*large:
return -1
def clearassets(context, data):
for asset in context.portfolio.positions:
position = context.portfolio.positions[asset].amount
if position <0:
context.longsells.append(asset)
elif position >0:
context.shortsells.append(asset)
order_target_percent(asset, 0)
| 25.26
| 61
| 0.639747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 0.082344
|
36b8262c6d34969be77ba59f989410637bf778e2
| 6,097
|
py
|
Python
|
google_drive_online_decompression.py
|
xunyixiangchao/Google-Drive-Online-Decompression
|
02121e3c25ad0ef3ceb0652a4a4e16f803e8463a
|
[
"Apache-2.0"
] | null | null | null |
google_drive_online_decompression.py
|
xunyixiangchao/Google-Drive-Online-Decompression
|
02121e3c25ad0ef3ceb0652a4a4e16f803e8463a
|
[
"Apache-2.0"
] | null | null | null |
google_drive_online_decompression.py
|
xunyixiangchao/Google-Drive-Online-Decompression
|
02121e3c25ad0ef3ceb0652a4a4e16f803e8463a
|
[
"Apache-2.0"
] | 1
|
2021-06-04T16:08:35.000Z
|
2021-06-04T16:08:35.000Z
|
# -*- coding: utf-8 -*-
"""Google_Drive_Online_Decompression.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/16e0tv3LEkAFaYHmKH2H63Cg6rpCNWFky
# **第一步 绑定GoogleDrive**
"""
#@markdown 点击左侧按钮,授权绑定GoogleDrive
from google.colab import drive
drive.mount('/content/drive')
"""# **RAR**
# 查看单个RAR压缩文件的目录树
"""
#@markdown 点击左侧按钮,查看单个RAR压缩包里面的目录结构
#@markdown <font size="4" color=red><b>destination</b></font> 查看的RAR压缩包的路径(带.rar后缀)
destination = "" #@param {type:"string"}
!unrar v "$destination"
"""# 查看目录下所有RAR压缩文件的目录树"""
#@markdown 点击左侧按钮,查看目录下所有RAR压缩包的目录结构
#@markdown <font size="4" color=red><b>destination</b></font> 要查看的目录的路径(不带.rar后缀)
destination = "" #@param {type:"string"}
!unrar v "$destination/*.rar"
"""## 解压单个RAR压缩包 ****支持分压卷****"""
#@markdown 点击左侧按钮,解压单个RAR压缩包
#@markdown <font size="4" color=red><b>destination</b></font> 解压的文件的路径(带.rar后缀)
destination = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>files</b></font> 解压文件的目的地(目录)
files = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>password</b></font> 解压密码(有就填写没有就不填)
password = "" #@param {type:"string"}
print("若没有设置密码则直接回车即可")
!unrar x -p"$password" -o+ "$destination" "$files"
"""## 批量解压RAR"""
#@markdown 点击左侧按钮,解压整个目录下多个RAR压缩包
#@markdown <font size="4" color=red><b>destination</b></font> 解压的文件的路径(不带.rar后缀)
destination = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>files</b></font> 解压文件的目的地(目录)
files = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>password</b></font> 解压密码(有就填写没有就不填,因为是批量!所以必须密码是统一的,否则必定报错!!!)
password = "" #@param {type:"string"}
print("若没有设置密码则直接回车即可")
!unrar x -p"$password" -o+ "$destination/*.rar" "$files"
"""# **ZIP**
# 查看单个ZIP压缩文件的目录树
"""
#@markdown 点击左侧按钮,查看单个ZIP压缩包的目录结构
#@markdown <font size="4" color=red><b>destination</b></font> 查看的文件的路径(带.zip后缀)
destination = "" #@param {type:"string"}
!unzip -l "$destination"
"""# 查看多个ZIP压缩文件里面的目录树"""
#@markdown 点击左侧按钮,查看整个目录下ZIP压缩包的目录结构
#@markdown <font size="4" color=red><b>destination</b></font> 查看的文件夹的路径(不带.zip后缀)
destination = "" #@param {type:"string"}
!unzip -l "$destination/*.zip"
"""### 解压单个ZIP压缩包 ****支持分压卷****"""
#@markdown 点击左侧按钮,解压单个ZIP压缩包
#@markdown <font size="4" color=red><b>destination</b></font> 解压的文件的路径(带.zip后缀)
destination = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>files</b></font> 解压文件的目的地(目录)
files = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>password</b></font> 解压密码(有就填写没有就不填)
password = "" #@param {type:"string"}
print("若没有设置密码则直接回车即可")
!7z x -aoa "$destination" -P"$password" -o"$files"
"""## 批量解压ZIP"""
#@markdown 点击左侧按钮,解压整个目录下多个ZIP压缩包
#@markdown <font size="4" color=red><b>destination</b></font> 填入要解压的文件的路径(不带.zip后缀)
destination = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>files</b></font> 解压文件的目的地(目录)
files = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>password</b></font> 解压密码(有就填写没有就不填,因为是批量!所以必须密码是统一的,否则必定报错!!!)
password = "" #@param {type:"string"}
print("若没有设置密码则直接回车即可")
!7z x -aoa "$destination/*.zip" -P"$password" -o"$files"
"""# **7Z**
# 查看单个7Z压缩文件的目录树
"""
#@markdown 点击左侧按钮,查看单个7Z压缩包的目录结构
#@markdown <font size="4" color=red><b>destination</b></font> 查看压缩包的路径(带.7z后缀)
destination = "" #@param {type:"string"}
!7z l "$destination"
"""# 查看多个7Z压缩文件的目录树"""
#@markdown 点击左侧按钮,查看整个目录下7Z压缩包的目录结构
#@markdown <font size="4" color=red><b>destination</b></font> 查看目录的路径(不带.7z后缀)
destination = "" #@param {type:"string"}
!7z l "$destination/*.7z.*"
"""## 解压单个7Z压缩包 ****支持分压卷****"""
#@markdown 点击左侧按钮,解压单个7Z压缩包
#@markdown <font size="4" color=red><b>destination</b></font> 解压的7Z压缩包的路径(带.7z后缀)
destination = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>files</b></font> 解压压缩文件到文件夹目录(目的地)
files = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>password</b></font> 压缩密码(有就填写没有就不填)
password = "" #@param {type:"string"}
print("若没有设置密码则直接回车即可")
!7z x -aoa "$destination" -P"$password" -r -o"$files"
"""## 批量解压7z"""
#@markdown 点击左侧按钮,解压整个目录下多个7Z压缩包
#@markdown <font size="4" color=red><b>destination</b></font> 解压的文件目录的路径(不带.7z后缀)
destination = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>files</b></font> 解压压缩文件到文件夹目录(目的地)
files = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>password</b></font> 压缩密码(有就填写没有就不填,因为是批量!所以必须密码是统一的,否则必定报错!!!)
password = "" #@param {type:"string"}
print("若没有设置密码则直接回车即可")
!7z x -aoa "$destination/*.7z" -P"$password" -o"$files"
"""# <font color=red><b>**通用格式**</b></font>
# 查看单个压缩文件的目录树
"""
#@markdown 点击左侧按钮,查看单个压缩包的目录结构
#@markdown <font size="4" color=red><b>destination</b></font> 查看压缩包的路径(带.xxx后缀)
destination = "" #@param {type:"string"}
!7z l "$destination"
"""# 查看多个压缩文件的目录树"""
#@markdown 点击左侧按钮,查看整个目录下压缩包的目录结构
#@markdown <font size="4" color=red><b>destination</b></font> 查看目录的路径(不带.xxx后缀)
destination = "" #@param {type:"string"}
!7z l "$destination/*.*"
"""## 解压单个压缩包 ****支持分压卷****"""
#@markdown 点击左侧按钮,解压单个压缩包
#@markdown <font size="4" color=red><b>destination</b></font> 解压的7Z压缩包的路径(带.xxx后缀)
destination = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>files</b></font> 解压压缩文件到文件夹目录(目的地)
files = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>password</b></font> 压缩密码(有就填写没有就不填)
password = "" #@param {type:"string"}
!7z x -aoa "$destination" -P"$password" -r -o"$files"
"""## 批量解压"""
#@markdown 点击左侧按钮,解压整个目录下多个压缩包
#@markdown <font size="4" color=red><b>destination</b></font> 解压的文件目录的路径(不带.xxx后缀)
destination = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>files</b></font> 解压压缩文件到文件夹目录(目的地)
files = "" #@param {type:"string"}
#@markdown <font size="4" color=red><b>password</b></font> 压缩密码(有就填写没有就不填,因为是批量!所以必须密码是统一的,否则必定报错!!!)
password = "" #@param {type:"string"}
!7z x -aoa "$destination/*.*" -P"$password" -o"$files"
| 23.360153
| 102
| 0.657865
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,385
| 0.885598
|
36b8b92109f8c9655104ce9dade2ed763cbf2735
| 678
|
py
|
Python
|
hackerearth/Algorithms/A plane journey/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | 4
|
2020-07-24T01:59:50.000Z
|
2021-07-24T15:14:08.000Z
|
hackerearth/Algorithms/A plane journey/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
hackerearth/Algorithms/A plane journey/solution.py
|
ATrain951/01.python-com_Qproject
|
c164dd093954d006538020bdf2e59e716b24d67c
|
[
"MIT"
] | null | null | null |
"""
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
"""
# Write your code here
n, m = map(int, input().strip().split())
a = sorted(map(int, input().strip().split()), reverse=True)
b = sorted(map(int, input().strip().split()), reverse=True)
if a[0] > b[0]:
print(-1)
else:
min_time = 1
i = j = 0
while i < len(a):
if j < len(b) and a[i] <= b[j]:
j += 1
elif a[i] <= b[j - 1]:
min_time += 2
i += 1
print(min_time)
| 26.076923
| 94
| 0.538348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 274
| 0.40413
|
36b8bfd65b80b877d57938c5b868d8f66abde496
| 65
|
py
|
Python
|
ml/av/io/__init__.py
|
necla-ml/ml
|
7ebd29382326e3958297607da7182c211865e7ff
|
[
"BSD-3-Clause"
] | 1
|
2022-02-21T21:06:29.000Z
|
2022-02-21T21:06:29.000Z
|
ml/av/io/__init__.py
|
necla-ml/ml
|
7ebd29382326e3958297607da7182c211865e7ff
|
[
"BSD-3-Clause"
] | null | null | null |
ml/av/io/__init__.py
|
necla-ml/ml
|
7ebd29382326e3958297607da7182c211865e7ff
|
[
"BSD-3-Clause"
] | null | null | null |
"""APIs from ml.vision.io and ml.audio.io
"""
from .api import *
| 16.25
| 41
| 0.661538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.692308
|
36b8ccb8c50334dfa92a74050719c2548bf9dec4
| 738
|
py
|
Python
|
addon.py
|
codingPF/plugin.video.newsApp
|
64f7c3e2e742cef5cd7c3303e2ffb3ec07771476
|
[
"MIT"
] | null | null | null |
addon.py
|
codingPF/plugin.video.newsApp
|
64f7c3e2e742cef5cd7c3303e2ffb3ec07771476
|
[
"MIT"
] | null | null | null |
addon.py
|
codingPF/plugin.video.newsApp
|
64f7c3e2e742cef5cd7c3303e2ffb3ec07771476
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
The main addon module
SPDX-License-Identifier: MIT
"""
# -- Imports ------------------------------------------------
import xbmcaddon
import resources.lib.appContext as appContext
import resources.lib.settings as Settings
import resources.lib.logger as Logger
import resources.lib.main as Main
# -- Main Code ----------------------------------------------
if __name__ == '__main__':
appContext.init()
appContext.initAddon(xbmcaddon.Addon())
appContext.initLogger(Logger.Logger(appContext.ADDONCLASS.getAddonInfo('id'), appContext.ADDONCLASS.getAddonInfo('version')))
appContext.initSettings(Settings.Settings(appContext.ADDONCLASS))
PLUGIN = Main.Main()
PLUGIN.run()
del PLUGIN
| 29.52
| 129
| 0.647696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 228
| 0.308943
|
36ba21d593e601f39648ce3de11ea90f9d215efd
| 6,226
|
py
|
Python
|
bfgame/components/equipment.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | 3
|
2017-10-28T11:28:38.000Z
|
2018-09-12T09:47:00.000Z
|
bfgame/components/equipment.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
bfgame/components/equipment.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
from bflib import units
from core import contexts
from core.components import Component, listing
from core.messaging import StringBuilder, Actor, Target, Verb
@listing.register
class Equipment(Component):
NAME = "equipment"
__slots__ = ["armor_restrictions", "weapon_restrictions", "weapon_size_restrictions",
"wear_locations", "wield_locations", "empty_wield_locations"
"worn_items", "wielded_items"]
"""
This component attaches itself to anything with a bodies.
It represents equipment worn or wielded
"""
def __init__(self):
super().__init__()
self.armor_restrictions = None
self.weapon_restrictions = None
self.weapon_size_restrictions = None
def on_register(self, host):
super().on_register(host)
host_restrictions = self.host.restrictions
if host_restrictions:
self.armor_restrictions = host_restrictions.armor
self.weapon_restrictions = host_restrictions.weapons
self.weapon_size_restrictions = host_restrictions.weapon_size
def copy(self):
return Equipment()
def remove(self, item):
found_slots = False
for item_slot in self.get_worn_item_slots():
if item_slot.item == item:
found_slots = True
item_slot.item = None
if found_slots:
return True
for item_slot in self.get_wielded_grasp_slots():
if item_slot.item == item:
item_slot.item = None
found_slots = True
if found_slots:
return True
return False
def wear(self, item):
if self.armor_restrictions and not self.armor_restrictions.can_wear(item.base):
return False
if not item.wearable:
return False
empty_item_slots = self.get_empty_item_slots()
for wear_location_set in item.wearable.wear_locations:
if hasattr(wear_location_set, '__iter__'):
# Multiple Location Slot
for slot in wear_location_set:
proper_slot = next((item_slot for item_slot in empty_item_slots
if item_slot.keyword == slot), None)
if proper_slot is not None:
proper_slot.item = item
else:
return False
context = contexts.Action(self.host, item)
message = StringBuilder(Actor, Verb("wear", Actor), Target, ".")
self.host.game.echo.see(self.host, message, context)
return True
else:
# Single Location Slot
proper_slot = next((item_slot for item_slot in empty_item_slots
if item_slot.keyword == wear_location_set), None)
if proper_slot is not None:
proper_slot.item = item
context = contexts.Action(self.host, item)
message = StringBuilder(Actor, Verb("wear", Actor), Target, ".")
self.host.game.echo.see(self.host, message, context)
return True
return False
def wield(self, item):
if self.weapon_restrictions and not self.weapon_restrictions.can_wield(item.base):
return False
hands = 1
if self.weapon_size_restrictions:
keyword = self.weapon_size_restrictions.can_wield(item.base)
if not keyword:
return False
else:
if keyword == self.weapon_size_restrictions.keywords.NeedsTwoHands:
hands = 2
empty_grasp_slots = self.get_empty_grasp_slots()
if len(empty_grasp_slots) >= hands:
while hands > 0:
item_slot = empty_grasp_slots.pop(0)
item_slot.item = item
hands -= 1
context = contexts.Action(self.host, item)
message = StringBuilder(Actor, Verb("wield", Actor), Target, ".")
self.host.game.echo.see(self.host, message, context)
return True
return False
def get_melee_total_armor_class(self):
all_items = self.get_all_items()
armor_ac = sum([item.armor.armor_class for item in all_items if item.armor])
shield_ac = sum([item.shield.armor_class_melee for item in all_items if item.shield])
return armor_ac + shield_ac
def get_ranged_total_armor_class(self):
all_items = self.get_all_items()
armor_ac = sum([item.armor.armor_class for item in all_items if item.armor])
shield_ac = sum([item.shield.armor_class_missile for item in all_items if item.shield])
return armor_ac + shield_ac
def get_all_items(self):
items = self.get_worn_items()
items.extend(self.get_wielded_items())
return items
def get_empty_item_slots(self):
body_parts = self.host.body.get_body_parts()
return [item_slot for body_part in body_parts for item_slot in body_part.item_slots if not item_slot.item]
def get_empty_grasp_slots(self):
body_parts = self.host.body.get_body_parts()
return [item_slot for body_part in body_parts for item_slot in body_part.grasp_slots if not item_slot.item]
def get_worn_items(self):
return [item_slot.item for item_slot in self.get_worn_item_slots()]
def get_worn_item_slots(self):
body_parts = self.host.body.get_body_parts()
return [item_slot for body_part in body_parts for item_slot in body_part.item_slots if item_slot.item]
def get_wielded_items(self):
return [item_slot.item for item_slot in self.get_wielded_grasp_slots()]
def get_wielded_grasp_slots(self):
body_parts = self.host.body.get_body_parts()
return [grasp_slot for body_part in body_parts for grasp_slot in body_part.grasp_slots if grasp_slot.item]
def get_load_of_worn_items(self):
worn_items = self.get_worn_items()
total_weight = units.Pound(0)
for item in worn_items:
total_weight += item.weight.score
return total_weight
| 37.506024
| 115
| 0.625442
| 6,046
| 0.971089
| 0
| 0
| 6,064
| 0.97398
| 0
| 0
| 362
| 0.058143
|
36ba65041a866ce133db66a746c7905283d02484
| 544
|
py
|
Python
|
students/K33402/Shuginin_Yurii/LR2/homework_board/board_app/urls.py
|
emina13/ITMO_ICT_WebDevelopment_2021-2022
|
498a6138e352e7e0ca40d1eb301bc29416158f51
|
[
"MIT"
] | null | null | null |
students/K33402/Shuginin_Yurii/LR2/homework_board/board_app/urls.py
|
emina13/ITMO_ICT_WebDevelopment_2021-2022
|
498a6138e352e7e0ca40d1eb301bc29416158f51
|
[
"MIT"
] | null | null | null |
students/K33402/Shuginin_Yurii/LR2/homework_board/board_app/urls.py
|
emina13/ITMO_ICT_WebDevelopment_2021-2022
|
498a6138e352e7e0ca40d1eb301bc29416158f51
|
[
"MIT"
] | 1
|
2022-03-19T09:24:42.000Z
|
2022-03-19T09:24:42.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.StartPageView.as_view()),
path('accounts/created/', views.NotificationView.as_view()),
path('accounts/<int:pk>/update/', views.StudentUpdate.as_view()),
path('profile/', views.ProfilePageView.as_view()),
path('profile/all_tasks/', views.AllTasks.as_view()),
path('profile/all_tasks/answer', views.solution_create),
path('profile/class_marks/subject_select', views.subject_select),
path('profile/class_marks', views.class_marks),
]
| 38.857143
| 69
| 0.71875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 161
| 0.295956
|
36bbde81383cafa0b00f9d5defddc4acebc151af
| 4,478
|
py
|
Python
|
tests/enviroments_test/test_environments.py
|
DKE-Data/agrirouter-sdk-python
|
6d6b26606f7d424c62289af56da55acf412772fc
|
[
"Apache-2.0"
] | null | null | null |
tests/enviroments_test/test_environments.py
|
DKE-Data/agrirouter-sdk-python
|
6d6b26606f7d424c62289af56da55acf412772fc
|
[
"Apache-2.0"
] | null | null | null |
tests/enviroments_test/test_environments.py
|
DKE-Data/agrirouter-sdk-python
|
6d6b26606f7d424c62289af56da55acf412772fc
|
[
"Apache-2.0"
] | null | null | null |
"""Test agrirouter/environments/environments.py"""
from agrirouter.environments.environments import ProductionEnvironment as PE
from agrirouter.environments.environments import QAEnvironment as QAE
from tests.constants import application_id
class TestPE:
def test_get_base_url(self):
assert PE().get_base_url() == PE._ENV_BASE_URL
def test_get_api_prefix(self):
assert PE().get_api_prefix() == PE._API_PREFIX
def test_get_registration_service_url(self):
assert PE().get_registration_service_url() == PE._REGISTRATION_SERVICE_URL
def test_get_onboard_url(self):
onb_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard"
assert PE().get_onboard_url() == onb_url
def test_get_secured_onboard_url(self):
onb_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard/request"
assert PE().get_secured_onboard_url() == onb_url
def test_get_verify_onboard_request_url(self):
req_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard/verify"
assert PE().get_verify_onboard_request_url() == req_url
def test_get_revoke_url(self):
rev_url = PE._REGISTRATION_SERVICE_URL + PE._API_PREFIX + "/registration/onboard/revoke"
assert PE().get_revoke_url() == rev_url
def test_get_agrirouter_login_url(self):
login_url = PE._ENV_BASE_URL + PE._AGRIROUTER_LOGIN_URL
assert PE().get_agrirouter_login_url() == login_url
def test_get_secured_onboarding_authorization_url(self):
redirect_uri = "www.my_redirect.com"
response_type = "response_type"
assert PE().get_secured_onboarding_authorization_url(
application_id, response_type, "state", redirect_uri
) == "https://goto.my-agrirouter.com/application/{application_id}/authorize?response_type={response_type}&state={state}".format( # noqa
application_id=application_id,
response_type=response_type,
state="state") + f"&redirect_uri={redirect_uri}"
def test_get_mqtt_server_url(self):
assert PE().get_mqtt_server_url(
"localhost", "5000"
) == PE._MQTT_URL_TEMPLATE.format(
host="localhost", port="5000"
)
def test_get_env_public_key(self):
assert PE().get_env_public_key() == PE.AR_PUBLIC_KEY
class TestQAE:
def test_get_base_url(self):
assert QAE().get_base_url() == QAE._ENV_BASE_URL
def test_get_api_prefix(self):
assert QAE().get_api_prefix() == QAE._API_PREFIX
def test_get_registration_service_url(self):
assert QAE().get_registration_service_url() == QAE._REGISTRATION_SERVICE_URL
def test_get_onboard_url(self):
onb_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard"
assert QAE().get_onboard_url() == onb_url
def test_get_secured_onboard_url(self):
onb_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard/request"
assert QAE().get_secured_onboard_url() == onb_url
def test_get_verify_onboard_request_url(self):
req_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard/verify"
assert QAE().get_verify_onboard_request_url() == req_url
def test_get_revoke_url(self):
rev_url = QAE._REGISTRATION_SERVICE_URL + QAE._API_PREFIX + "/registration/onboard/revoke"
assert QAE().get_revoke_url() == rev_url
def test_get_agrirouter_login_url(self):
login_url = QAE._ENV_BASE_URL + QAE._AGRIROUTER_LOGIN_URL
assert QAE().get_agrirouter_login_url() == login_url
def test_get_secured_onboarding_authorization_url(self):
redirect_uri = "www.my_redirect.com"
response_type = "response_type"
assert QAE().get_secured_onboarding_authorization_url(
application_id, response_type, "state", redirect_uri
) == QAE._ENV_BASE_URL + QAE._SECURED_ONBOARDING_AUTHORIZATION_LINK_TEMPLATE.format(
application_id=application_id,
response_type=response_type,
state="state") + f"&redirect_uri={redirect_uri}"
def test_get_mqtt_server_url(self):
assert QAE().get_mqtt_server_url(
"localhost", "5000"
) == QAE._MQTT_URL_TEMPLATE.format(host="localhost", port="5000")
def test_get_env_public_key(self):
assert QAE().get_env_public_key() == QAE.AR_PUBLIC_KEY
| 42.245283
| 143
| 0.712818
| 4,230
| 0.944618
| 0
| 0
| 0
| 0
| 0
| 0
| 629
| 0.140464
|
36bbe5261935347fbb62f2ff569d538d41679679
| 556
|
py
|
Python
|
foursquare/tests/test_lang.py
|
milind-shakya-sp/foursquare
|
1df90777f9b86d8247e8d79e7fbe8e88c8cdd467
|
[
"MIT"
] | 1
|
2019-06-10T21:12:01.000Z
|
2019-06-10T21:12:01.000Z
|
foursquare/tests/test_lang.py
|
milind-shakya-sp/foursquare
|
1df90777f9b86d8247e8d79e7fbe8e88c8cdd467
|
[
"MIT"
] | null | null | null |
foursquare/tests/test_lang.py
|
milind-shakya-sp/foursquare
|
1df90777f9b86d8247e8d79e7fbe8e88c8cdd467
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# (c) 2016 Mike Lewis
import logging; log = logging.getLogger(__name__)
from . import MultilangEndpointTestCase
class MultiLangTestCase(MultilangEndpointTestCase):
"""
General
"""
def test_lang(self):
"""Test a wide swath of languages"""
for api in self.apis:
categories = api.venues.categories()
assert 'categories' in categories, u"'categories' not in response"
assert len(categories['categories']) > 1, u'Expected multiple categories'
| 27.8
| 85
| 0.652878
| 393
| 0.706835
| 0
| 0
| 0
| 0
| 0
| 0
| 210
| 0.377698
|
36bc7e0436f464b768c92e41f855171401f6f554
| 4,923
|
py
|
Python
|
src/tests/model_deployment_tests.py
|
vravisrpi/mlops-vertex
|
0944b22996a5405f64d7ae162bd2427ffd81884d
|
[
"Apache-2.0"
] | null | null | null |
src/tests/model_deployment_tests.py
|
vravisrpi/mlops-vertex
|
0944b22996a5405f64d7ae162bd2427ffd81884d
|
[
"Apache-2.0"
] | null | null | null |
src/tests/model_deployment_tests.py
|
vravisrpi/mlops-vertex
|
0944b22996a5405f64d7ae162bd2427ffd81884d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test an uploaded model to Vertex AI."""
import os
import logging
import tensorflow as tf
test_instance = {
"dropoff_grid": ["POINT(-87.6 41.9)"],
"euclidean": [2064.2696],
"loc_cross": [""],
"payment_type": ["Credit Card"],
"pickup_grid": ["POINT(-87.6 41.9)"],
"trip_miles": [1.37],
"trip_day": [12],
"trip_hour": [16],
"trip_month": [2],
"trip_day_of_week": [4],
"trip_seconds": [555],
}
SERVING_DEFAULT_SIGNATURE_NAME = "serving_default"
from google.cloud import aiplatform as vertex_ai
def test_model_artifact():
pass
'''
feature_types = {
"dropoff_grid": tf.dtypes.string,
"euclidean": tf.dtypes.float32,
"loc_cross": tf.dtypes.string,
"payment_type": tf.dtypes.string,
"pickup_grid": tf.dtypes.string,
"trip_miles": tf.dtypes.float32,
"trip_day": tf.dtypes.int64,
"trip_hour": tf.dtypes.int64,
"trip_month": tf.dtypes.int64,
"trip_day_of_week": tf.dtypes.int64,
"trip_seconds": tf.dtypes.int64,
}
new_test_instance = dict()
for key in test_instance:
new_test_instance[key] = tf.constant(
[test_instance[key]], dtype=feature_types[key]
)
print(new_test_instance)
project = os.getenv("PROJECT")
region = os.getenv("REGION")
model_display_name = os.getenv("MODEL_DISPLAY_NAME")
assert project, "Environment variable PROJECT is None!"
assert region, "Environment variable REGION is None!"
assert model_display_name, "Environment variable MODEL_DISPLAY_NAME is None!"
vertex_ai.init(project=project, location=region,)
models = vertex_ai.Model.list(
filter=f'display_name={model_display_name}',
order_by="update_time"
)
assert (
models
), f"No model with display name {model_display_name} exists!"
model = models[-1]
artifact_uri = model.gca_resource.artifact_uri
logging.info(f"Model artifact uri:{artifact_uri}")
assert tf.io.gfile.exists(
artifact_uri
), f"Model artifact uri {artifact_uri} does not exist!"
saved_model = tf.saved_model.load(artifact_uri)
logging.info("Model loaded successfully.")
assert (
SERVING_DEFAULT_SIGNATURE_NAME in saved_model.signatures
), f"{SERVING_DEFAULT_SIGNATURE_NAME} not in model signatures!"
prediction_fn = saved_model.signatures["serving_default"]
predictions = prediction_fn(**new_test_instance)
logging.info("Model produced predictions.")
keys = ["classes", "scores"]
for key in keys:
assert key in predictions, f"{key} in prediction outputs!"
assert predictions["classes"].shape == (
1,
2,
), f"Invalid output classes shape: {predictions['classes'].shape}!"
assert predictions["scores"].shape == (
1,
2,
), f"Invalid output scores shape: {predictions['scores'].shape}!"
logging.info(f"Prediction output: {predictions}")
'''
def test_model_endpoint():
pass
'''
project = os.getenv("PROJECT")
region = os.getenv("REGION")
model_display_name = os.getenv("MODEL_DISPLAY_NAME")
endpoint_display_name = os.getenv("ENDPOINT_DISPLAY_NAME")
assert project, "Environment variable PROJECT is None!"
assert region, "Environment variable REGION is None!"
assert model_display_name, "Environment variable MODEL_DISPLAY_NAME is None!"
assert endpoint_display_name, "Environment variable ENDPOINT_DISPLAY_NAME is None!"
endpoints = vertex_ai.Endpoint.list(
filter=f'display_name={endpoint_display_name}',
order_by="update_time"
)
assert (
endpoints
), f"Endpoint with display name {endpoint_display_name} does not exist! in region {region}"
endpoint = endpoints[-1]
logging.info(f"Calling endpoint: {endpoint}.")
prediction = endpoint.predict([test_instance]).predictions[0]
keys = ["classes", "scores"]
for key in keys:
assert key in prediction, f"{key} in prediction outputs!"
assert (
len(prediction["classes"]) == 2
), f"Invalid number of output classes: {len(prediction['classes'])}!"
assert (
len(prediction["scores"]) == 2
), f"Invalid number output scores: {len(prediction['scores'])}!"
logging.info(f"Prediction output: {prediction}")
'''
| 31.557692
| 95
| 0.672354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,535
| 0.921186
|
36bd2fc4cc690280e24a0e546825f3792edd1b9b
| 266
|
py
|
Python
|
noxfile.py
|
aodag/asbool
|
1c5d74c9b2f641a3452c1e7118a4a83ffe665ab5
|
[
"MIT"
] | 8
|
2015-11-20T01:20:13.000Z
|
2021-02-20T04:24:08.000Z
|
noxfile.py
|
aodag/asbool
|
1c5d74c9b2f641a3452c1e7118a4a83ffe665ab5
|
[
"MIT"
] | 2
|
2020-12-08T05:16:48.000Z
|
2021-02-16T11:12:06.000Z
|
noxfile.py
|
aodag/asbool
|
1c5d74c9b2f641a3452c1e7118a4a83ffe665ab5
|
[
"MIT"
] | null | null | null |
import nox
nox.options.sessions = ["test"]
@nox.session
def test(session):
session.install("-e", ".[testing]")
session.run("pytest")
@nox.session
def pack(session):
session.install("build")
session.run("python", "-m", "build", ".")
| 17.733333
| 45
| 0.593985
| 0
| 0
| 0
| 0
| 209
| 0.785714
| 0
| 0
| 59
| 0.221805
|
36bdb06f6f3497fa1d06a8cb17f94061f6766f18
| 9,085
|
py
|
Python
|
selectGoodFeatures.py
|
TimSC/PyFeatureTrack
|
11668181e56fb9472a0c8db291c88546e7fae0cf
|
[
"BSD-2-Clause"
] | 33
|
2015-02-24T18:23:11.000Z
|
2022-03-08T09:55:02.000Z
|
selectGoodFeatures.py
|
Nestart/PyFeatureTrack
|
11668181e56fb9472a0c8db291c88546e7fae0cf
|
[
"BSD-2-Clause"
] | 1
|
2017-03-08T21:07:33.000Z
|
2017-06-04T21:58:01.000Z
|
selectGoodFeatures.py
|
Nestart/PyFeatureTrack
|
11668181e56fb9472a0c8db291c88546e7fae0cf
|
[
"BSD-2-Clause"
] | 15
|
2016-06-29T08:55:58.000Z
|
2020-06-04T03:01:39.000Z
|
from __future__ import print_function
import math, numpy as np
from PIL import Image
from klt import *
from error import *
from convolve import *
from klt_util import *
import goodFeaturesUtils
class selectionMode:
SELECTING_ALL = 1
REPLACING_SOME = 2
KLT_verbose = 1
#*********************************************************************
def _fillFeaturemap(x, y, featuremap, mindist, ncols, nrows):
for iy in range(y - mindist,y + mindist + 1):
for ix in range(x - mindist, x + mindist + 1):
if ix >= 0 and ix < ncols and iy >= 0 and iy < nrows:
featuremap[iy*ncols+ix] = True
return featuremap
#*********************************************************************
#* _enforceMinimumDistance
#*
#* Removes features that are within close proximity to better features.
#*
#* INPUTS
#* featurelist: A list of features. The nFeatures property
#* is used.
#*
#* OUTPUTS
#* featurelist: Is overwritten. Nearby "redundant" features are removed.
#* Writes -1's into the remaining elements.
#*
#* RETURNS
#* The number of remaining features.
#*
def _enforceMinimumDistance(pointlist, featurelist, ncols, nrows, mindist, min_eigenvalue, overwriteAllFeatures):
#int indx; # Index into features
#int x, y, val; # Location and trackability of pixel under consideration
#uchar *featuremap; # Boolean array recording proximity of features
#int *ptr;
# Cannot add features with an eigenvalue less than one
if min_eigenvalue < 1: min_eigenvalue = 1
# Allocate memory for feature map and clear it
#featuremap = (uchar *) malloc(ncols * nrows * sizeof(uchar));
#memset(featuremap, 0, ncols*nrows);
featuremap = [False for i in range(ncols * nrows)]
# Necessary because code below works with (mindist-1)
mindist = mindist - 1
# If we are keeping all old good features, then add them to the featuremap
if not overwriteAllFeatures:
for indx, feat in enumerate(featurelist):
if featurelist[indx].val >= 0:
x = int(featurelist[indx].x)
y = int(featurelist[indx].y)
featuremap = _fillFeaturemap(x, y, featuremap, mindist, ncols, nrows)
# For each feature point, in descending order of importance, do ...
indx = 0
pointlistIndx = 0
while True:
# If we can't add all the points, then fill in the rest
# of the featurelist with -1's */
if pointlistIndx >= len(pointlist):
while indx < len(featurelist):
if overwriteAllFeatures and featurelist[indx].val < 0:
featurelist[indx].x = -1
featurelist[indx].y = -1
featurelist[indx].val = kltState.KLT_NOT_FOUND
featurelist[indx].aff_img = None
featurelist[indx].aff_img_gradx = None
featurelist[indx].aff_img_grady = None
featurelist[indx].aff_x = -1.0
featurelist[indx].aff_y = -1.0
featurelist[indx].aff_Axx = 1.0
featurelist[indx].aff_Ayx = 0.0
featurelist[indx].aff_Axy = 0.0
featurelist[indx].aff_Ayy = 1.0
indx = indx + 1
break
pointdata = pointlist[pointlistIndx]
x = pointdata[1]
y = pointdata[2]
val = pointdata[0]
pointlistIndx += 1
# Ensure that feature is in-bounds
assert x >= 0
assert x < ncols
assert y >= 0
assert y < nrows
while not overwriteAllFeatures and indx < len(featurelist) and featurelist[indx].val >= 0:
indx = indx + 1
if indx >= len(featurelist): break
# If no neighbor has been selected, and if the minimum
# eigenvalue is large enough, then add feature to the current list
if not featuremap[y*ncols+x] and val >= min_eigenvalue:
featurelist[indx].x = x
featurelist[indx].y = y
featurelist[indx].val = int(val)
featurelist[indx].aff_img = None
featurelist[indx].aff_img_gradx = None
featurelist[indx].aff_img_grady = None
featurelist[indx].aff_x = -1.0
featurelist[indx].aff_y = -1.0
featurelist[indx].aff_Axx = 1.0
featurelist[indx].aff_Ayx = 0.0
featurelist[indx].aff_Axy = 0.0
featurelist[indx].aff_Ayy = 1.0
indx = indx + 1
# Fill in surrounding region of feature map, but
# make sure that pixels are in-bounds */
featuremap = _fillFeaturemap(x, y, featuremap, mindist, ncols, nrows);
return featurelist
#*********************************************************************
def _KLTSelectGoodFeatures(tc,img,nFeatures,mode):
featurelist = [KLT_Feature() for i in range(nFeatures)]
#_KLT_FloatImage floatimg, gradx, grady;
#int window_hw, window_hh
#int *pointlist
overwriteAllFeatures = (mode == selectionMode.SELECTING_ALL)
floatimages_created = False
ncols, nrows = img.size
# Check window size (and correct if necessary)
if tc.window_width % 2 != 1:
tc.window_width = tc.window_width+1
KLTWarning("Tracking context's window width must be odd. Changing to {0}.\n".format(tc.window_width))
if tc.window_height % 2 != 1:
tc.window_height = tc.window_height+1
KLTWarning("Tracking context's window height must be odd. Changing to {0}.\n".format(tc.window_height))
if tc.window_width < 3:
tc.window_width = 3
KLTWarning("Tracking context's window width must be at least three. \nChanging to %d.\n".format(tc.window_width))
if tc.window_height < 3:
tc.window_height = 3
KLTWarning("Tracking context's window height must be at least three. \nChanging to %d.\n".format(tc.window_height))
window_hw = tc.window_width/2
window_hh = tc.window_height/2
# Create pointlist, which is a simplified version of a featurelist,
# for speed. Contains only integer locations and values.
#pointlist = [0 for i in range(ncols * nrows * 3)]
# Create temporary images, etc.
if mode == selectionMode.REPLACING_SOME and tc.sequentialMode and tc.pyramid_last != None:
floatimg = tc.pyramid_last.img[0]
gradx = tc.pyramid_last_gradx.img[0]
grady = tc.pyramid_last_grady.img[0]
assert gradx != None
assert grady != None
else:
floatimages_created = True
floatimg = Image.new("F", img.size)
gradx = Image.new("F", img.size)
grady = Image.new("F", img.size)
if tc.smoothBeforeSelecting:
#_KLT_FloatImage tmpimg;
#tmpimg = Image.new("F", img.size)
tmpimg = np.array(img.convert("F"))
floatimg = KLTComputeSmoothedImage(tmpimg, KLTComputeSmoothSigma(tc))
#_KLTFreeFloatImage(tmpimg)
else:
floatimg = np.array(img.convert("F"))
# Compute gradient of image in x and y direction
gradx, grady = KLTComputeGradients(floatimg, tc.grad_sigma)
# Write internal images
if tc.writeInternalImages:
floatimg.save("kltimg_sgfrlf.pgm")
gradx.save("kltimg_sgfrlf_gx.pgm")
grady.save("kltimg_sgfrlf_gy.pgm")
# Compute trackability of each image pixel as the minimum
# of the two eigenvalues of the Z matrix
#register float gx, gy;
#register float gxx, gxy, gyy;
#register int xx, yy;
#register int *ptr;
#float val;
#unsigned int limit = 1;
borderx = tc.borderx; # Must not touch cols
bordery = tc.bordery; # lost by convolution
#int x, y;
#int i;
if borderx < window_hw: borderx = window_hw
if bordery < window_hh: bordery = window_hh
# Find largest value of an int
#for (i = 0 ; i < sizeof(int) ; i++) limit *= 256;
#limit = limit/2 - 1;
#gradxArr = np.array(gradx)
#gradyArr = np.array(grady)
pointlistx,pointlisty,pointlistval=goodFeaturesUtils.ScanImageForGoodFeatures(gradx,\
grady, borderx, bordery, window_hw, window_hh, tc.nSkippedPixels)
# Sort the features
pointlist = list(zip(pointlistval, pointlistx, pointlisty))
pointlist.sort()
pointlist.reverse()
#print(pointlist)
# Check tc.mindist
if tc.mindist < 0:
KLTWarning("(_KLTSelectGoodFeatures) Tracking context field tc.mindist is negative ({0}); setting to zero".format(tc.mindist))
tc.mindist = 0;
# Enforce minimum distance between features
_enforceMinimumDistance(pointlist, \
featurelist, \
ncols, nrows, \
tc.mindist, \
tc.min_eigenvalue, \
overwriteAllFeatures)
# Free memory
# free(pointlist);
# if (floatimages_created) {
# _KLTFreeFloatImage(floatimg);
# _KLTFreeFloatImage(gradx);
# _KLTFreeFloatImage(grady);
# }
return featurelist
#*********************************************************************
#* KLTSelectGoodFeatures
#*
#* Main routine, visible to the outside. Finds the good features in
#* an image.
#*
#* INPUTS
#* tc: Contains parameters used in computation (size of image,
#* size of window, min distance b/w features, sigma to compute
#* image gradients, # of features desired).
#* img: Pointer to the data of an image (probably unsigned chars).
#*
#* OUTPUTS
#* features: List of features. The member nFeatures is computed.
#*
def KLTSelectGoodFeatures(tc, img, nFeatures):
ncols, nrows = img.size
#int ncols, int nrows,
if KLT_verbose >= 1:
print("(KLT) Selecting the {0} best features from a {1} by {2} image... ".format(nFeatures, ncols, nrows))
fl = _KLTSelectGoodFeatures(tc, img, nFeatures, selectionMode.SELECTING_ALL)
if KLT_verbose >= 1:
print("\n\t{0} features found.\n".format(KLTCountRemainingFeatures(fl)))
if tc.writeInternalImages:
print("\tWrote images to 'kltimg_sgfrlf*.pgm'.\n")
return fl
| 30.692568
| 128
| 0.681233
| 59
| 0.006494
| 0
| 0
| 0
| 0
| 0
| 0
| 3,869
| 0.425867
|
36be052ecd5aed78815486dfc598f4e2ff2a749d
| 3,345
|
py
|
Python
|
pysparsdr/pySparSDR.py
|
ucsdwcsng/pySparSDR
|
6622fce9c75b180b8601d9deecafff401e6a4d9f
|
[
"Apache-2.0"
] | null | null | null |
pysparsdr/pySparSDR.py
|
ucsdwcsng/pySparSDR
|
6622fce9c75b180b8601d9deecafff401e6a4d9f
|
[
"Apache-2.0"
] | null | null | null |
pysparsdr/pySparSDR.py
|
ucsdwcsng/pySparSDR
|
6622fce9c75b180b8601d9deecafff401e6a4d9f
|
[
"Apache-2.0"
] | null | null | null |
#/bin/python3
import numpy as np
from scipy import signal as sig
class pySparSDRCompress():
'''
Implementation of the SparSDR Compressor based on
Khazraee, M., Guddeti, Y., Crow, S., Snoeren, A.C., Levchenko, K., Bharadia, D. and Schulman, A., 2019, June. Sparsdr: Sparsity-proportional backhaul and compute for sdrs. In Proceedings of the 17th Annual International Conference on Mobile Systems, Applications, and Services (pp. 391-403).
'''
def __init__(self,nfft=1024,thresholdVec=None):
'''
Initialize SparSDR Compressor
:input: nfft :shouldBeEven: Number of bins in fft
'''
assert not nfft%2
self.nfft = nfft
self.nover = int(self.nfft/2)
self.windowVec = sig.windows.hann(self.nfft, sym=False)
self.windowVec = np.expand_dims(self.windowVec,axis=1)
if thresholdVec is None:
self.setThreshold(np.zeros((1,self.nfft)))
else:
self.setThreshold(thresholdVec)
self.bufferState = np.zeros((self.nover,))
self.numWinProcessed = 0
def reset(self):
'''
Resets internal memory if the compressor needs to be re-started
(soft-reset)
'''
self.bufferState = 0*self.bufferState
self.numWinProcessed = 0
def setThreshold(self, thresholdVec):
'''
Sets internal threshold vector
:input: thresholdVec :shape==(1,nfft): real-valued thresholds as numpy array
'''
assert thresholdVec.shape == (1,self.nfft)
self.thresholdVec = thresholdVec
def work(self, xIn):
'''
Perform compression on input vector
:input: xIn :numElements==k*nfft: input signal as a numpy array
:output: (windowIdx, binIdx, binValue)
:output: windowIdx : Index of window over all-time
:output: binIdx : Index of bin in a particular window
:output: binValue : Value of the binIdx at the windowIdx
This function remembers past input and stores overlap in the bufferState
variable
'''
assert not xIn.size%self.nfft
# concatenate filter state
xIn = np.concatenate((self.bufferState, xIn))
# Half-Overlapped windowing
evenWindows = self.windowVec*xIn[:-self.nover].reshape((self.nfft,-1))
oddWindows = self.windowVec*xIn[self.nover:].reshape((self.nfft,-1))
# Fourier Transform
evenWindows = np.fft.fft(evenWindows,axis=0)
oddWindows = np.fft.fft(oddWindows,axis=0)
# Interleave overlapped windows
output = np.empty((self.nfft, 2*evenWindows.shape[1]) , dtype=evenWindows.dtype)
output[:,0::2] = evenWindows
output[:,1::2] = oddWindows
output = output.transpose()
# Threshold to find areas of activity
thresholdFlag = np.abs(output) > self.thresholdVec
thresholdFlag = np.transpose(thresholdFlag.nonzero())
# Select only active bins
output = output[thresholdFlag[:,0],thresholdFlag[:,1]]
thresholdFlag[:,0] = self.numWinProcessed + thresholdFlag[:,0]
# Update internal states
self.bufferState = xIn[-self.nover:]
self.numWinProcessed = self.numWinProcessed + 2*evenWindows.shape[1]
return thresholdFlag[:,0], thresholdFlag[:,1], output
| 37.166667
| 295
| 0.635277
| 3,277
| 0.979671
| 0
| 0
| 0
| 0
| 0
| 0
| 1,387
| 0.414649
|
36bf9270f81abe8f83096f56129e26e2554011cc
| 803
|
py
|
Python
|
dirtyclean/tests/test_dirtyclean.py
|
paultopia/dirtyclean
|
1b93b29e070b53afede22ff28497fd68f28d0326
|
[
"MIT"
] | 2
|
2017-12-04T16:58:57.000Z
|
2021-03-02T04:59:54.000Z
|
dirtyclean/tests/test_dirtyclean.py
|
paultopia/dirtyclean
|
1b93b29e070b53afede22ff28497fd68f28d0326
|
[
"MIT"
] | null | null | null |
dirtyclean/tests/test_dirtyclean.py
|
paultopia/dirtyclean
|
1b93b29e070b53afede22ff28497fd68f28d0326
|
[
"MIT"
] | null | null | null |
from dirtyclean import clean
import unittest
class TestDirtyClean(unittest.TestCase):
def setUp(self):
self.uglystring = " st—up•id ‘char−ac ter..s’, in its’ string...”Ç "
with open("multiline.txt") as mt:
self.multiline = mt.read()
def test_basic_clean(self):
self.assertEqual(clean(self.uglystring),
"st up id char ac ter s in its string Ç")
def test_simplify_letters(self):
self.assertEqual(clean(self.uglystring, simplify_letters=True),
"st up id char ac ter s in its string C")
def test_multiline(self):
self.assertEqual(clean(self.multiline),
"I am the very model of a multiline string with more stuff than you might want to have in there Ç")
| 33.458333
| 124
| 0.617684
| 771
| 0.940244
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.321951
|
36bfb2d78d16ac861521aa10b4dcdbc76d656637
| 1,321
|
py
|
Python
|
findNearestControl.py
|
petrarch1603/SurveyApplications
|
129a4e24123bf81687c0a60cccbe3d0a83f63e40
|
[
"MIT"
] | 1
|
2019-08-24T20:29:05.000Z
|
2019-08-24T20:29:05.000Z
|
findNearestControl.py
|
petrarch1603/SurveyApplications
|
129a4e24123bf81687c0a60cccbe3d0a83f63e40
|
[
"MIT"
] | null | null | null |
findNearestControl.py
|
petrarch1603/SurveyApplications
|
129a4e24123bf81687c0a60cccbe3d0a83f63e40
|
[
"MIT"
] | null | null | null |
import csv
control = "/Users/patrickmcgranaghan1/Documents/Python/python_work/SurveyApplications/source_data/control.csv"
set_points = "/Users/patrickmcgranaghan1/Documents/Python/python_work/SurveyApplications/source_data/setPoints.csv"
max_hypotenuse = 200 # Integer in feet
# Note in the State Plane Coordinate System the coordinates are written Northing(Y), Easting(X)
# This is the opposite of the normal (X, Y) coordinate system.
with open(set_points, 'r') as set_pts:
set_reader = csv.reader(set_pts)
for set_coord in set_reader:
temp_list = []
with open(control, 'r') as ctrl:
ctrl_reader = csv.reader(ctrl)
for ctrl_coord in ctrl_reader:
xDelta = int(set_coord[2]) - int(ctrl_coord[2])
yDelta = int(set_coord[1]) - int(ctrl_coord[1])
hypotenuse = ((xDelta ** 2) + (yDelta ** 2)) ** 0.5
if hypotenuse <= max_hypotenuse:
tup = (ctrl_coord[0], hypotenuse)
temp_list.append(tup)
closest_base = (min(temp_list, key=lambda t: t[1]))
# Below write code to insert the closest control points into the spreadsheet in a selected column
print(set_coord[0] + " is closest to " + (closest_base[0]) + ". A distance of " + str(closest_base[1]))
| 48.925926
| 115
| 0.650265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 516
| 0.390613
|
36c07f8de0ab1e4bb4abec5686212164de45b5a1
| 2,118
|
py
|
Python
|
stac_compose/collections/controller.py
|
dgi-catalog/stac-compose
|
1cae4a58fcfb36082c203db3c99e2779fc207400
|
[
"MIT"
] | null | null | null |
stac_compose/collections/controller.py
|
dgi-catalog/stac-compose
|
1cae4a58fcfb36082c203db3c99e2779fc207400
|
[
"MIT"
] | 14
|
2021-03-01T20:59:20.000Z
|
2021-11-24T19:14:49.000Z
|
stac_compose/collections/controller.py
|
dgi-catalog/stac-compose
|
1cae4a58fcfb36082c203db3c99e2779fc207400
|
[
"MIT"
] | null | null | null |
from json import dumps
from pprint import PrettyPrinter
from cerberus.validator import Validator
from flask import request
from flask_restx import Resource
from werkzeug.exceptions import BadRequest
from stac_compose.collections import ns as api
from stac_compose.collections.business import CollectionsBusiness
from stac_compose.collections.parsers import validate, COLLECTIONS_CONTROLLER_VALIDATION
from stac_compose.decorator import catch_generic_exceptions
from stac_compose.environment import SC_LOGGING_LEVEL
from stac_compose.logger import create_logger
# create logger object
logger = create_logger(__name__, level=SC_LOGGING_LEVEL)
pp = PrettyPrinter(indent=4)
@api.route('/')
class CollectionsController(Resource):
"""CollectionsController"""
@catch_generic_exceptions
def get(self):
args = request.args.to_dict(flat=True)
logger.info(f'received args: {args}')
v = Validator(COLLECTIONS_CONTROLLER_VALIDATION)
if not v.validate(args):
errors = dumps(v.errors)
logger.error(f'request arguments are not valid: {errors}\n')
raise BadRequest(errors) # 400 - Bad Request
# get validated arguments
validated_args = v.document
logger.info(f'validated args: {validated_args}\n')
# return a list of STAC collections by providers
return CollectionsBusiness.get_collections_by_providers(validated_args)
@api.route('/items/')
class CollectionsItemsController(Resource):
"""CollectionsItemsController"""
@catch_generic_exceptions
def get(self):
args = request.args.to_dict(flat=True)
logger.info('args: %s', args)
data, status = validate(args, 'search_get')
logger.info('data: %s', data)
logger.info('status: %s', status)
if status is False:
raise BadRequest(dumps(data)) # 400 - Bad Request
features = CollectionsBusiness.search_get(**request.args)
# logger.debug('\n\nCollectionsItemsController.get() - features: %s \n\n', features)
# pp.pprint(features)
return features
| 29.830986
| 92
| 0.715297
| 1,399
| 0.660529
| 0
| 0
| 1,437
| 0.67847
| 0
| 0
| 460
| 0.217186
|
36c20378107325500044b16060b5655f3ad7826c
| 6,070
|
py
|
Python
|
python/tvm/auto_scheduler/workload_registry.py
|
jiangzoi/incubator-tvm
|
144c6f45f7217b9df2f5605e06f0903e470ac11c
|
[
"Apache-2.0"
] | 2
|
2020-07-07T07:38:45.000Z
|
2021-06-02T07:08:09.000Z
|
python/tvm/auto_scheduler/workload_registry.py
|
jiangzoi/incubator-tvm
|
144c6f45f7217b9df2f5605e06f0903e470ac11c
|
[
"Apache-2.0"
] | 1
|
2020-07-29T07:29:17.000Z
|
2020-07-29T07:29:17.000Z
|
python/tvm/auto_scheduler/workload_registry.py
|
jiangzoi/incubator-tvm
|
144c6f45f7217b9df2f5605e06f0903e470ac11c
|
[
"Apache-2.0"
] | 1
|
2021-07-03T08:09:32.000Z
|
2021-07-03T08:09:32.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Workload registration and serialization.
We use a json string to represent a workload (a computation graph).
The format of the string is `[func_name, [args...]]`.
The dag should be the return value of this `func_name(*args)`.
Rationale: The workload is actually a compute dag defined by tvm dsl. But serializing compute dags
and matching them efficiently is not easy. Therefore, we use the above string to encode a compute
dag.
These strings are efficient for serialization/matching and won't be too long.
When we need the dag, we decode the string and call the function, which will return the dag.
"""
import pickle
import json
import tvm._ffi
from .utils import serialize_args, deserialize_args, get_func_name
WORKLOAD_FUNC_REGISTRY = {}
def register_workload(func_name, f=None, override=False):
""" Register a function that generates a certain workload.
The input function should take hashable and jsonable arguments
(int, float, tuple of int, tvm.tensor.Tensor, ...) and return a list of tvm.tensor.Tensor.
Parameters
----------
func_name : Union[Function, str]
The generation function that returns the compute declaration Tensors or its function name.
f : Optional[Function]
The generation function to be registered.
override : boolean = False
Whether override existing entry.
Examples
--------
@auto_scheduler.register_workload
def matmul(N, M, K):
A = te.placeholder((N, K), name='A')
B = te.placeholder((K, M), name='B')
k = te.reduce_axis((0, K), name='k')
C = te.compute((N, M), lambda i, j: tvm.sum(A[i][k] * B[k][j], axis=[k]), name='C')
return [A, B, C]
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func_name):
f = func_name
func_name = get_func_name(f)
if not isinstance(func_name, str):
raise ValueError("expect string function name")
def register(myf):
"""internal register function"""
if func_name in WORKLOAD_FUNC_REGISTRY and not override:
raise RuntimeError('%s has been registered already' % func_name)
WORKLOAD_FUNC_REGISTRY[func_name] = myf
return myf
if f:
return register(f)
return register
def make_workload_key(func, args):
""" Make a workload key by function and arguments.
Parameters
----------
func : Union[Function, str]
The function that returns the compute declaration Tensors.
Can be the a function or the function name.
args : Args
The args of the function.
Returns
-------
workload_key : Str
The workload key of the function.
"""
global WORKLOAD_FUNC_REGISTRY
if callable(func):
func_name = get_func_name(func)
elif isinstance(func, str):
func_name = func
else:
raise ValueError("Invalid function: " + str(func) +
" . `make_workload_key` expects a callable function or its function name")
if not func_name in WORKLOAD_FUNC_REGISTRY:
raise ValueError("%s is not registered. " % func,
"Please register it with @auto_scheduler.register_workload")
args = serialize_args(args)
return json.dumps((func_name,) + args)
def decode_workload_key_to_func_args(workload_key):
""" Decode a workload key to the registerd function name and its corresponding args.
Parameters
----------
workload_key : str
The input workload key.
Returns
-------
name : str
The function name of this workload key.
args : List[Tensor]
The args of the generation function.
"""
global WORKLOAD_FUNC_REGISTRY
workload = json.loads(workload_key)
if not workload[0] in WORKLOAD_FUNC_REGISTRY:
raise ValueError("%s is not registered. " % workload[0] +
"Please register it with @auto_scheduler.register_workload")
return workload[0], deserialize_args(workload[1:])
@tvm._ffi.register_func("auto_scheduler.workload_key_to_tensors")
def workload_key_to_tensors(workload_key):
""" Get the input/output tensors from the workload key.
This method is usually used to create a ComputeDAG by workload key.
Parameters
----------
workload_key : str
The input workload key.
Returns
-------
tensors : List[Tensor]
The registered compute declaration Tensors.
"""
global WORKLOAD_FUNC_REGISTRY
name, args = decode_workload_key_to_func_args(workload_key)
lookup = WORKLOAD_FUNC_REGISTRY[name]
assert callable(lookup)
return lookup(*args)
def save_workload_func_registry(filename):
""" Dump workload function registry to a pickle binary file.
Parameters
----------
filename : str
The filename to dump workload function registry to.
"""
global WORKLOAD_FUNC_REGISTRY
pickle.dump(WORKLOAD_FUNC_REGISTRY, open(filename, 'wb'))
def load_workload_func_registry(filename):
""" Load workload function registry from a pickle binary file.
Parameters
----------
filename : str
The filename to load workload function registry from.
"""
global WORKLOAD_FUNC_REGISTRY
WORKLOAD_FUNC_REGISTRY = pickle.load(open(filename, 'rb'))
| 31.614583
| 99
| 0.682208
| 0
| 0
| 0
| 0
| 633
| 0.104283
| 0
| 0
| 4,065
| 0.669687
|
36c26ea8b70af852028240a4c83a673def2fbdd3
| 485
|
py
|
Python
|
main/xrandr/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
main/xrandr/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
main/xrandr/template.py
|
RoastVeg/cports
|
803c7f07af341eb32f791b6ec1f237edb2764bd5
|
[
"BSD-2-Clause"
] | null | null | null |
pkgname = "xrandr"
pkgver = "1.5.1"
pkgrel = 0
build_style = "gnu_configure"
hostmakedepends = ["pkgconf"]
makedepends = ["libxrandr-devel"]
pkgdesc = "Command line interface to X RandR extension"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT"
url = "https://xorg.freedesktop.org"
source = f"$(XORG_SITE)/app/{pkgname}-{pkgver}.tar.xz"
sha256 = "7bc76daf9d72f8aff885efad04ce06b90488a1a169d118dea8a2b661832e8762"
def post_install(self):
self.install_license("COPYING")
| 30.3125
| 75
| 0.748454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 285
| 0.587629
|
36c29207131a5d0aabb533544ef1349cab67ea61
| 2,477
|
py
|
Python
|
src/arch/riscv/RiscvCPU.py
|
yclin99/CS251A_final_gem5
|
391ca1d7c9484f4d58fce9a4424821dcbb2463ac
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T13:18:26.000Z
|
2022-03-25T13:18:26.000Z
|
src/arch/riscv/RiscvCPU.py
|
yclin99/CS251A_final_gem5
|
391ca1d7c9484f4d58fce9a4424821dcbb2463ac
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T14:15:30.000Z
|
2022-03-25T14:15:30.000Z
|
src/arch/riscv/RiscvCPU.py
|
ksco/gem5-xiangshan
|
0baf1b5229885d81d689a677102f0665aaac5514
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2021 Google, Inc.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.objects.BaseAtomicSimpleCPU import BaseAtomicSimpleCPU
from m5.objects.BaseNonCachingSimpleCPU import BaseNonCachingSimpleCPU
from m5.objects.BaseTimingSimpleCPU import BaseTimingSimpleCPU
from m5.objects.BaseO3CPU import BaseO3CPU
from m5.objects.BaseMinorCPU import BaseMinorCPU
from m5.objects.RiscvDecoder import RiscvDecoder
from m5.objects.RiscvMMU import RiscvMMU
from m5.objects.RiscvInterrupts import RiscvInterrupts
from m5.objects.RiscvISA import RiscvISA
class RiscvCPU:
ArchDecoder = RiscvDecoder
ArchMMU = RiscvMMU
ArchInterrupts = RiscvInterrupts
ArchISA = RiscvISA
class RiscvAtomicSimpleCPU(BaseAtomicSimpleCPU, RiscvCPU):
mmu = RiscvMMU()
class RiscvNonCachingSimpleCPU(BaseNonCachingSimpleCPU, RiscvCPU):
mmu = RiscvMMU()
class RiscvTimingSimpleCPU(BaseTimingSimpleCPU, RiscvCPU):
mmu = RiscvMMU()
class RiscvO3CPU(BaseO3CPU, RiscvCPU):
mmu = RiscvMMU()
class RiscvMinorCPU(BaseMinorCPU, RiscvCPU):
mmu = RiscvMMU()
| 44.232143
| 72
| 0.805006
| 498
| 0.20105
| 0
| 0
| 0
| 0
| 0
| 0
| 1,467
| 0.592249
|
36c4f0d8dd30675016f1cde8a4e0b430d5e215ed
| 164
|
py
|
Python
|
misc/validateInput.py
|
viju4you/Python
|
3c4a3a46265e71fc21da62d2cb204d20dcd9ec62
|
[
"CC0-1.0"
] | 110
|
2017-03-11T23:37:46.000Z
|
2021-07-12T11:51:32.000Z
|
misc/validateInput.py
|
viju4you/Python
|
3c4a3a46265e71fc21da62d2cb204d20dcd9ec62
|
[
"CC0-1.0"
] | null | null | null |
misc/validateInput.py
|
viju4you/Python
|
3c4a3a46265e71fc21da62d2cb204d20dcd9ec62
|
[
"CC0-1.0"
] | 52
|
2016-11-27T19:50:40.000Z
|
2022-02-09T06:37:24.000Z
|
# Validate input
while True:
print('Enter your age:')
age = input()
if age.isdecimal():
break
print('Pleas enter a number for your age.')
| 16.4
| 47
| 0.597561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 69
| 0.420732
|
36c5772cb7b021a7fd6965ba28a4663832c436d3
| 1,003
|
py
|
Python
|
ckan/migration/versions/041_resource_new_fields.py
|
florianm/ckan
|
1cfd98d591ac70b4eb81048bcd227b6c1354b1bf
|
[
"Apache-2.0"
] | 12
|
2015-08-28T16:59:07.000Z
|
2020-03-08T01:39:30.000Z
|
ckan/migration/versions/041_resource_new_fields.py
|
florianm/ckan
|
1cfd98d591ac70b4eb81048bcd227b6c1354b1bf
|
[
"Apache-2.0"
] | 13
|
2019-05-02T21:01:28.000Z
|
2020-10-20T23:34:48.000Z
|
ckan/migration/versions/041_resource_new_fields.py
|
florianm/ckan
|
1cfd98d591ac70b4eb81048bcd227b6c1354b1bf
|
[
"Apache-2.0"
] | 10
|
2015-05-08T04:33:20.000Z
|
2020-03-03T15:17:58.000Z
|
from migrate import *
def upgrade(migrate_engine):
migrate_engine.execute(
'''
begin;
ALTER TABLE resource
ADD COLUMN name text,
ADD COLUMN resource_type text,
ADD COLUMN mimetype text,
ADD COLUMN mimetype_inner text,
ADD COLUMN "size" bigint,
ADD COLUMN last_modified timestamp without time zone,
ADD COLUMN cache_url text,
ADD COLUMN cache_last_updated timestamp without time zone,
ADD COLUMN webstore_url text,
ADD COLUMN webstore_last_updated timestamp without time zone;
ALTER TABLE resource_revision
ADD COLUMN name text,
ADD COLUMN resource_type text,
ADD COLUMN mimetype text,
ADD COLUMN mimetype_inner text,
ADD COLUMN "size" bigint,
ADD COLUMN last_modified timestamp without time zone,
ADD COLUMN cache_url text,
ADD COLUMN cache_last_updated timestamp without time zone,
ADD COLUMN webstore_url text,
ADD COLUMN webstore_last_updated timestamp without time zone;
commit;
'''
)
| 30.393939
| 65
| 0.731805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 912
| 0.909272
|
36c6dd4f5d4854726c666ad63dd36dff26b82159
| 1,153
|
py
|
Python
|
src/train_model.py
|
hzdr/dvc_tutorial_series
|
f53eee599cc05e2c2ea31f6e2fd567a4ac3061a3
|
[
"BSD-3-Clause"
] | 2
|
2021-06-24T13:39:39.000Z
|
2022-02-27T13:35:02.000Z
|
src/train_model.py
|
hzdr/dvc_tutorial_series
|
f53eee599cc05e2c2ea31f6e2fd567a4ac3061a3
|
[
"BSD-3-Clause"
] | null | null | null |
src/train_model.py
|
hzdr/dvc_tutorial_series
|
f53eee599cc05e2c2ea31f6e2fd567a4ac3061a3
|
[
"BSD-3-Clause"
] | null | null | null |
import pickle
import pandas as pd
import yaml
from sklearn.linear_model import ElasticNet, LogisticRegression
from sklearn.ensemble import RandomForestRegressor
from config import Config
Config.MODELS_PATH.mkdir(parents=True, exist_ok=True)
with open ("params.yaml", "r") as fd:
params = yaml.safe_load(fd)
model_type = params['model_type']
lr = params['lr']
random_state = params['random_state']
#epochs = params['train']['epochs']
alpha = params['train']['alpha']
l1_rate = params['train']['l1_rate']
X_train = pd.read_csv(str(Config.FEATURES_PATH / "train_features.csv"))
y_train = pd.read_csv(str(Config.FEATURES_PATH / "train_labels.csv"))
if model_type == "LogisticRegression":
model = LogisticRegression(l1_ratio=l1_rate, random_state=random_state)
if model_type == "RandomForestRegressor":
model = RandomForestRegressor(
n_estimators=150, max_depth=6, random_state=random_state
)
if model_type == "ElasticNet":
model = ElasticNet(
alpha=alpha, l1_ratio=l1_rate, random_state=random_state
)
model.fit(X_train, y_train)
pickle.dump(model, open(str(Config.MODELS_PATH / "model.pickle"), "wb"))
| 28.825
| 75
| 0.743278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 222
| 0.192541
|
36c6fc43e8d2fdc269e708e857550cc5862aa1c5
| 8,226
|
py
|
Python
|
opentracing/harness/api_check.py
|
autocracy/opentracing-python
|
ac45df0c39b4cce8e6e6ca40dedc2b9f6c388328
|
[
"MIT"
] | null | null | null |
opentracing/harness/api_check.py
|
autocracy/opentracing-python
|
ac45df0c39b4cce8e6e6ca40dedc2b9f6c388328
|
[
"MIT"
] | null | null | null |
opentracing/harness/api_check.py
|
autocracy/opentracing-python
|
ac45df0c39b4cce8e6e6ca40dedc2b9f6c388328
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2016 The OpenTracing Authors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import time
import pytest
import opentracing
from opentracing import Format
class APICompatibilityCheckMixin(object):
"""
A mixin class for validation that a given tracer implementation
satisfies the requirements of the OpenTracing API.
"""
def tracer(self):
raise NotImplementedError('Subclass must implement tracer()')
def check_baggage_values(self):
"""If true, the test will validate Baggage items by storing and
retrieving them from the trace context. If false, it will only attempt
to store and retrieve the Baggage items to check the API compliance,
but not actually validate stored values. The latter mode is only
useful for no-op tracer.
"""
return True
def test_start_span(self):
tracer = self.tracer()
span = tracer.start_span(operation_name='Fry')
span.finish()
with tracer.start_span(operation_name='Fry',
tags={'birthday': 'August 14 1974'}) as span:
span.log_event('birthplace',
payload={'hospital': 'Brooklyn Pre-Med Hospital',
'city': 'Old New York'})
def test_start_span_with_parent(self):
tracer = self.tracer()
parent_span = tracer.start_span(operation_name='parent')
assert parent_span is not None
span = tracer.start_span(
operation_name='Leela',
child_of=parent_span)
span.finish()
span = tracer.start_span(
operation_name='Leela',
references=[opentracing.follows_from(parent_span.context)],
tags={'birthplace': 'sewers'})
span.finish()
parent_span.finish()
def test_start_child_span(self):
tracer = self.tracer()
parent_span = tracer.start_span(operation_name='parent')
assert parent_span is not None
child_span = opentracing.start_child_span(
parent_span, operation_name='Leela')
child_span.finish()
parent_span.finish()
def test_set_operation_name(self):
span = self.tracer().start_span().set_operation_name('Farnsworth')
span.finish()
def test_span_as_context_manager(self):
finish = {'called': False}
def mock_finish(*_):
finish['called'] = True
with self.tracer().start_span(operation_name='antiquing') as span:
setattr(span, 'finish', mock_finish)
assert finish['called'] is True
# now try with exception
finish['called'] = False
try:
with self.tracer().start_span(operation_name='antiquing') as span:
setattr(span, 'finish', mock_finish)
raise ValueError()
except ValueError:
assert finish['called'] is True
else:
raise AssertionError('Expected ValueError') # pragma: no cover
def test_span_tag_value_types(self):
with self.tracer().start_span(operation_name='ManyTypes') as span:
span. \
set_tag('an_int', 9). \
set_tag('a_bool', True). \
set_tag('a_string', 'aoeuidhtns')
def test_span_tags_with_chaining(self):
span = self.tracer().start_span(operation_name='Farnsworth')
span. \
set_tag('birthday', '9 April, 2841'). \
set_tag('loves', 'different lengths of wires')
span. \
set_tag('unicode_val', u'non-ascii: \u200b'). \
set_tag(u'unicode_key_\u200b', 'ascii val')
span.finish()
def test_span_logs(self):
span = self.tracer().start_span(operation_name='Fry')
# Newer API
span.log_kv(
{'frozen.year': 1999, 'frozen.place': 'Cryogenics Labs'})
span.log_kv(
{'defrosted.year': 2999, 'defrosted.place': 'Cryogenics Labs'},
time.time())
# Older API
span.\
log_event('frozen', {'year': 1999, 'place': 'Cryogenics Labs'}). \
log_event('defrosted', {'year': 2999}). \
log_event('became his own grandfather', 1947)
span.\
log(event='frozen'). \
log(payload={'year': 1999}). \
log(timestamp=time.time(),
event='frozen',
payload={'year': 1999}). \
log(timestamp=time.time(),
event='unfrozen',
payload={'year': 2999})
def test_span_baggage(self):
with self.tracer().start_span(operation_name='Fry') as span:
assert span.context.baggage == {}
span_ref = span.set_baggage_item('Kiff-loves', 'Amy')
assert span_ref is span
val = span.get_baggage_item('Kiff-loves')
if self.check_baggage_values():
assert 'Amy' == val
pass
def test_context_baggage(self):
with self.tracer().start_span(operation_name='Fry') as span:
assert span.context.baggage == {}
span.set_baggage_item('Kiff-loves', 'Amy')
if self.check_baggage_values():
assert span.context.baggage == {'Kiff-loves': 'Amy'}
pass
def test_text_propagation(self):
with self.tracer().start_span(operation_name='Bender') as span:
text_carrier = {}
self.tracer().inject(
span_context=span.context,
format=opentracing.Format.TEXT_MAP,
carrier=text_carrier)
extracted_ctx = self.tracer().extract(
format=opentracing.Format.TEXT_MAP,
carrier=text_carrier)
assert extracted_ctx.baggage == {}
def test_binary_propagation(self):
with self.tracer().start_span(operation_name='Bender') as span:
bin_carrier = bytearray()
self.tracer().inject(
span_context=span.context,
format=opentracing.Format.BINARY,
carrier=bin_carrier)
extracted_ctx = self.tracer().extract(
format=opentracing.Format.BINARY,
carrier=bin_carrier)
assert extracted_ctx.baggage == {}
def test_mandatory_formats(self):
formats = [
(Format.TEXT_MAP, {}),
(Format.HTTP_HEADERS, {}),
(Format.BINARY, bytearray()),
]
with self.tracer().start_span(operation_name='Bender') as span:
for fmt, carrier in formats:
# expecting no exceptions
span.tracer.inject(span.context, fmt, carrier)
span.tracer.extract(fmt, carrier)
def test_unknown_format(self):
custom_format = 'kiss my shiny metal ...'
with self.tracer().start_span(operation_name='Bender') as span:
with pytest.raises(opentracing.UnsupportedFormatException):
span.tracer.inject(span.context, custom_format, {})
with pytest.raises(opentracing.UnsupportedFormatException):
span.tracer.extract(custom_format, {})
| 39.358852
| 79
| 0.612205
| 7,003
| 0.851325
| 0
| 0
| 0
| 0
| 0
| 0
| 2,499
| 0.303793
|
36c93b1ef9b9eeb9b865aada75df7cf42d64021f
| 29,950
|
py
|
Python
|
Colab/vision_transformer_dogs_and_cats_python_generator.py
|
Thanusan19/Vision_Transformer
|
80179d57e617ef6cd9599de93c7c7633f891f9a9
|
[
"Apache-2.0"
] | 1
|
2021-07-02T13:55:11.000Z
|
2021-07-02T13:55:11.000Z
|
Colab/vision_transformer_dogs_and_cats_python_generator.py
|
Thanusan19/Vision_Transformer
|
80179d57e617ef6cd9599de93c7c7633f891f9a9
|
[
"Apache-2.0"
] | null | null | null |
Colab/vision_transformer_dogs_and_cats_python_generator.py
|
Thanusan19/Vision_Transformer
|
80179d57e617ef6cd9599de93c7c7633f891f9a9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Vision Transformer Dogs and Cats Python Generator
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/12u7r2OMkt_rFmOQq2g5FtX7Z0EbyPYFN
See code at https://github.com/google-research/vision_transformer/
See paper at https://arxiv.org/abs/2010.11929
This Colab allows you to run the [JAX](https://jax.readthedocs.org) implementation of the Vision Transformer.
## 1) Using generator
### 1.1) Download the dataset and unpack it on the colab machine
"""
!pwd
!mkdir dataset
!ls
!wget -c "https://download.microsoft.com/download/3/E/1/3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_3367a.zip" -P dataset/
!ls dataset/
# Quiet and overwrite, will create folder and unpack in CatsAndDogs
!unzip -qo dataset/kagglecatsanddogs_3367a.zip -d dataset/CatsAndDogs
# Print the number of cats and dogs images in the set
!ls -l dataset/CatsAndDogs/PetImages/Cat/*.jpg | wc -l
!ls -l dataset/CatsAndDogs/PetImages/Dog/*.jpg | wc -l
# Sanity check for later
!ls dataset/CatsAndDogs/PetImages/Cat/*.jpg | sed -E 's#.*/##' | sort > /tmp/Cats.txt
!ls dataset/CatsAndDogs/PetImages/Dog/*.jpg | sed -E 's#.*/##' | sort > /tmp/Dogs.txt
!diff /tmp/Cats.txt /tmp/Dogs.txt
"""### 1.2) Find the corrupted files
#### Find the corrupted files
"""
# Will be quiet, except for errors
# see [https://peteris.rocks/blog/quiet-and-unattended-installation-with-apt-get/]
!apt-get install imagemagick -qq > /dev/null
# Examples that are corrupted : Cat/1418.jpg, Cat/4293.jpg, Cat/666.jpg
# Can take a bit of time to check all 25000 images
!mogrify -set comment 'Image rewritten with ImageMagick' dataset/CatsAndDogs/PetImages/*/*.jpg |& tee dataset/CatsAndDogs/mogrify_output
#!cat dataset/CatsAndDogs/mogrify_output
"""#### Fix some problems with a certain picture in Cats (handmade)"""
# Sanity check for later
!ls dataset/CatsAndDogs/PetImages/Cat/*.jpg | sed -E 's#.*/##' | sort > /tmp/Cats.txt
!ls dataset/CatsAndDogs/PetImages/Dog/*.jpg | sed -E 's#.*/##' | sort > /tmp/Dogs.txt
!diff /tmp/Cats.txt /tmp/Dogs.txt
# Cat 10404 has three versions...
from google.colab import files
import time
files.view('dataset/CatsAndDogs/PetImages/Cat/10404-0.jpg')
time.sleep(0.5)
files.view('dataset/CatsAndDogs/PetImages/Cat/10404-1.jpg')
time.sleep(0.5)
files.view('dataset/CatsAndDogs/PetImages/Cat/10404-2.jpg')
!rm dataset/CatsAndDogs/PetImages/Cat/10404-1.jpg dataset/CatsAndDogs/PetImages/Cat/10404-2.jpg
!mv dataset/CatsAndDogs/PetImages/Cat/10404-0.jpg dataset/CatsAndDogs/PetImages/Cat/10404.jpg
# Sanity check for later
!ls dataset/CatsAndDogs/PetImages/Cat/*.jpg | sed -E 's#.*/##' | sort > /tmp/Cats.txt
!ls dataset/CatsAndDogs/PetImages/Dog/*.jpg | sed -E 's#.*/##' | sort > /tmp/Dogs.txt
!diff /tmp/Cats.txt /tmp/Dogs.txt
"""### 1.3) Create the exclusion and description files
#### Functions to create the exclusion list and the global description
"""
from pathlib import Path
import re
import time
def checkExistanceAndEmptiness(output_file_path:str, doOverwrite:bool):
okayToOverwrite = True
output_path = Path(output_file_path)
if output_path.exists():
print('File exists')
if output_path.stat().st_size != 0:
print('File is not empty')
if not doOverwrite:
okayToOverwrite = False
print('not over-writing')
else:
mode = 'w+'
print('over-writing')
else:
print('File is empty')
mode = 'w+'
else:
print('File don\'t exist')
mode = 'w'
return mode, okayToOverwrite
def createExclusionFile(dataset_dir_path:str, mogrify_output_file_path:str,
output_file_path:str, doOverwrite:bool=False):
"""
dataset_dir_path le chemin d'accès au dossier du dataset
output_file_path le chemin du fichier que l'on veut créer
doOverwrite permet d'écraser le fichier, si il existe déjà, si le paramètre
est passé à True (False par defaut).
"""
print
# Check if file exists or not and gives the write or write and read depending,
# as well as the bolean to overwrite or not the file
mode, okayToOverwrite = checkExistanceAndEmptiness(output_file_path, doOverwrite)
dataset_path = Path(dataset_dir_path)
output_path = Path(output_file_path)
print(dataset_path)
if okayToOverwrite:
with output_path.open(mode) as outfile:
#writing in the file
# Lecture du fichier d'exclusion
mogrify_output = Path(mogrify_output_file_path)
regex_files = re.compile('dataset/.*/[0-9]*.jpg')
added_lines = []
with mogrify_output.open('r') as infile:
for line in infile.readlines():
# time.sleep(1)
if line.endswith("\n"):
line = line[:-1]
first_match = regex_files.findall(line)[0]
first_path = Path(first_match)
string = str(first_path.relative_to(dataset_path))
# string = first_match.replace(str(dataset_path)+"/", "")
if string not in added_lines:
outfile.write(string+"\n")
added_lines.append(string)
def createGlobalDescription(dataset_dir_path:str, exclude_img_file_path:str,
output_file_path:str, doOverwrite:bool=False):
"""
Va generer le fichier de tout le dataset
dataset_dir_path le chemin d'accès au dossier du dataset
exclude_img_file_path le chemin d'accès d'un fichier d'exclusion de fichiers
corrompus dans la liste. De la forme :
path/vers/le/fichier1.jpg
path/vers/le/fichier2.jpg
path/vers/le/fichier3.jpg
path/vers/le/fichier4.jpg
output_file_path le chemin du fichier que l'on veut créer
doOverwrite permet d'écraser le fichier, si il existe déjà, si le paramètre
est passé à True (False par defaut).
"""
# Lecture du fichier d'exclusion
exclude_path = Path(exclude_img_file_path)
exclude_img_list = []
with exclude_path.open('r') as file:
for line in file.readlines():
if line.endswith("\n"):
line = line[:-1]
line = str(Path(line)) # To be able to compare it to other file path
#print("exclude file line :", line)
exclude_img_list.append(line)
print("exclude_img_list", exclude_img_list)
# Compter celui qui a le plus d'exclus, pour en avoir le même nombre de
# chaque coté
count_cat = 0; count_dog = 0
for exclude_file in exclude_img_list:
#print("Cat or Dog ?", exclude_file.split("/")[-2])
if exclude_file.split("/")[-2] == 'Cat':
count_cat += 1
else:
count_dog += 1
print("count_cat", count_cat, "count_dog", count_dog)
left_to_exclude_dogs = count_cat-count_dog if count_cat >= count_dog else 0
left_to_exclude_cats = count_dog-count_cat if count_dog >= count_cat else 0
# Check if file exists or not and gives the write or write and read depending,
# as well as the bolean to overwrite or not the file
mode, okayToOverwrite = checkExistanceAndEmptiness(output_file_path, doOverwrite)
output_path = Path(output_file_path)
# Ecriture du fichier
if okayToOverwrite:
with output_path.open(mode) as file:
#writing in the file
ds_dir_path = Path(dataset_dir_path)
#print("ds_dir_path", ds_dir_path)
class_num = -1
for class_dir in ds_dir_path.joinpath("PetImages").iterdir():
if class_dir.is_dir():
class_num += 1
print(" class_dir", class_dir)
print(" class_num", class_num)
if str(class_dir).endswith('Cat'):
left_to_exclude_count = left_to_exclude_cats
print(" left_to_exclude_count for Cats is :", left_to_exclude_count)
else:
left_to_exclude_count = left_to_exclude_dogs
print(" left_to_exclude_count for Dogs is :", left_to_exclude_count)
added_count = 0
for class_img in class_dir.iterdir():
if class_img.match('[0-9]*.jpg'):
local_image_path = class_img.relative_to(ds_dir_path)
# Check for exclusion
#print("class_img:", class_img)
#print("exclude_img_list:", exclude_img_list)
#print("class_img relative to:", str(class_img.relative_to(ds_dir_path)))
#time.sleep(2)
if str(local_image_path) not in exclude_img_list:
#print(" ds_dir_path", ds_dir_path)
#print(" class_dir", class_dir)
#print(" class_img", class_img)
if left_to_exclude_count > 0:
left_to_exclude_count -= 1
#print(" class_img", class_img)
print(" > that was a left to exclude", local_image_path)
#time.sleep(1)
else:
file.write(str(local_image_path) + "\t" + str(class_num) + "\n")
added_count += 1
else:
#print(" class_img", class_img)
print(" > excluded from the exclusion list", local_image_path)
#time.sleep(1)
if str(class_dir).endswith('Cat'):
print("Added", added_count, "cats to the description file")
else:
print("Added", added_count, "dogs to the description file")
"""#### Create the exclusion list and the global description"""
createExclusionFile(dataset_dir_path='./dataset/CatsAndDogs',
mogrify_output_file_path='./dataset/CatsAndDogs/mogrify_output',
output_file_path='./dataset/CatsAndDogs/exclude.txt',
doOverwrite=True)
createGlobalDescription(dataset_dir_path='./dataset/CatsAndDogs',
exclude_img_file_path='./dataset/CatsAndDogs/exclude.txt',
output_file_path='./dataset/CatsAndDogs/description.txt',
doOverwrite=True)
"""### 1.4) Create a training and a test set
##### The python generator for the dataset
"""
from pathlib import Path
import tensorflow as tf
import numpy as np
import cv2
import random
import math
class MyDogsCats:
def __init__(self, ds_description_path:str, dataset_path:str, set_type:str, train_prop:float) -> None:
"""
ds_description_path : fichier avec les paths de chaque fichiers du dataset et sa classe
Exemple de fichier (tabulation entre le path et la classe):
/truc/bidule/chat/01.jpg 0
/truc/bidule/chien/01.jpg 1
Etc ...
"""
# Lire le fichier de description et regrouper par classes
img_list_par_classes = {}
path = Path(ds_description_path)
with path.open('r') as file:
for line in file.readlines():
if line.endswith("\n"):
line = line[:-1]
splits = line.split("\t")
if line != "":
img_text = splits[0]
lbl_text = int(splits[1])
if lbl_text in img_list_par_classes.keys():
img_list_par_classes[lbl_text].append(img_text)
else:
img_list_par_classes[lbl_text] = [img_text]
#print(img_list_par_classes)
# Obtenir la liste de train OU de test
self._img_list = []
self._lbl_list = []
self._num_class = len(img_list_par_classes)
for num_class in img_list_par_classes:
# Definir les proportions
num_files = len(img_list_par_classes[num_class])
if set_type == "train":
num_per_class_to_keep = math.ceil((num_files // self._num_class) * train_prop)
class_files = img_list_par_classes[num_class][0:num_per_class_to_keep]
elif set_type == "test":
num_per_class_to_keep = math.floor((num_files // self._num_class) * (1 - train_prop))
class_files = img_list_par_classes[num_class][-num_per_class_to_keep:]
else:
class_files = img_list_par_classes[num_class]
# Ajouter les images qui correspondent à la liste des images
self._img_list.extend(class_files)
# De même pour les labels
#print("num_class:", num_class)
#print("type num_class:", type(num_class))
#print("len num_class:", len(class_files))
self._lbl_list.extend([num_class for i in range(len(class_files))])
#print("_img_list", self._img_list[0:100])
#print("_lbl_list", self._lbl_list[0:100])
assert(len(self._lbl_list) == len(self._img_list))
self.num_samples = len(self._lbl_list)
if set_type == "train" or set_type == "test":
self._set_type = set_type
else:
self._set_type = "whole"
self._img_size = 384
self._img_dim = (self._img_size, self._img_size)
self._num_channels = 3
self._one_hot_depth = 2
self._ds_path = Path(dataset_path)
def getDataset(self):
generator = self._generator
return tf.data.Dataset.from_generator(generator,
args=[],
output_types={'image': tf.float32, 'label': tf.int32},
output_shapes={'image': tf.TensorShape((self._img_size, self._img_size, self._num_channels)),
'label': tf.TensorShape((self._one_hot_depth))})
def _generator(self):
img_list = self._img_list
lbl_list = self._lbl_list
# Shuffle
c = list(zip(img_list, lbl_list))
random.shuffle(c)
img_list, lbl_list = zip(*c)
for i in range(self.num_samples):
#print('Reading from :', img_list[i])
#print('Good path :', self._ds_path/img_list[i])
#self._ds_path/img_list[i]
#print(self._ds_path/img_list[i])
# img_path_i = Path(img_list[i])
im = cv2.imread(str(self._ds_path/img_list[i]),-1)
if im is None:
i = 0
im = cv2.imread(str(self._ds_path/img_list[0]),-1)
if len(im.shape) < 3:
im = np.repeat(np.expand_dims(im, -1), 3, -1)
#print(type(im))
img = cv2.resize(im, self._img_dim)
img = img/255.0
#img = np.expand_dims(im, -1)
lbl = tf.one_hot(lbl_list[i], depth=self._one_hot_depth, dtype=tf.int32)
yield {'image': img, 'label': lbl}
"""## 2) ViT Colab
##### Copyright 2020 Google LLC.
"""
#@title Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""<a href="https://colab.research.google.com/github/google-research/vision_transformer/blob/master/vit_jax.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### Setup
Needs to be executed once in every VM.
The cell below downloads the code from Github and install necessary dependencies.
"""
#@markdown Select whether you would like to store data in your personal drive.
#@markdown
#@markdown If you select **yes**, you will need to authorize Colab to access
#@markdown your personal drive
#@markdown
#@markdown If you select **no**, then any changes you make will diappear when
#@markdown this Colab's VM restarts after some time of inactivity...
use_gdrive = 'yes' #@param ["yes", "no"]
if use_gdrive == 'yes':
from google.colab import drive
drive.mount('/gdrive')
root = '/gdrive/My Drive/vision_transformer_colab'
import os
if not os.path.isdir(root):
os.mkdir(root)
os.chdir(root)
print(f'\nChanged CWD to "{root}"')
else:
from IPython import display
display.display(display.HTML(
'<h1 style="color:red">CHANGES NOT PERSISTED</h1>'))
# Clone repository and pull latest changes.
![ -d vision_transformer ] || git clone --depth=1 https://github.com/google-research/vision_transformer
!cd vision_transformer && git pull
!pip install -qr vision_transformer/vit_jax/requirements.txt
#!pip install -r vision_transformer/vit_jax/requirements.txt
"""### Imports"""
# Shows all available pre-trained models.
!gsutil ls -lh gs://vit_models/*
"""For now let's try with `ViT-B_16` (pre-trained on imagenet21k, no fine tunning)."""
# Download a pre-trained model.
model = 'ViT-B_16'
![ -e "$model".npz ] || gsutil cp gs://vit_models/imagenet21k/"$model".npz .
#@markdown TPU setup : Boilerplate for connecting JAX to TPU.
import os
if 'google.colab' in str(get_ipython()) and 'COLAB_TPU_ADDR' in os.environ:
# Make sure the Colab Runtime is set to Accelerator: TPU.
import requests
if 'TPU_DRIVER_MODE' not in globals():
url = 'http://' + os.environ['COLAB_TPU_ADDR'].split(':')[0] + ':8475/requestversion/tpu_driver0.1-dev20191206'
resp = requests.post(url)
TPU_DRIVER_MODE = 1
# The following is required to use TPU Driver as JAX's backend.
from jax.config import config
config.FLAGS.jax_xla_backend = "tpu_driver"
config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
print('Registered TPU:', config.FLAGS.jax_backend_target)
else:
print('No TPU detected. Can be changed under "Runtime/Change runtime type".')
import flax
import jax
from matplotlib import pyplot as plt
import numpy as np
import tqdm
# Shows the number of available devices.
# In a CPU/GPU runtime this will be a single device.
# In a TPU runtime this will be 8 cores.
jax.local_devices()
# Open some code files in a split editor on the right.
# You can open more files in the file tab on the left.
from google.colab import files
files.view('vision_transformer/vit_jax/checkpoint.py')
files.view('vision_transformer/vit_jax/input_pipeline.py')
files.view('vision_transformer/vit_jax/models.py')
files.view('vision_transformer/vit_jax/momentum_clip.py')
files.view('vision_transformer/vit_jax/train.py')
files.view('vision_transformer/vit_jax/hyper.py')
# Commented out IPython magic to ensure Python compatibility.
# Import files from repository.
# Updating the files in the editor on the right will immediately update the
# modules by re-importing them.
import sys
if './vision_transformer' not in sys.path:
sys.path.append('./vision_transformer')
# From https://ipython.org/ipython-doc/3/config/extensions/autoreload.html
# Reload all modules (except those excluded by %aimport) every time before
# executing the Python code typed.
# %load_ext autoreload
# %autoreload 2
from vit_jax import checkpoint
from vit_jax import hyper
from vit_jax import input_pipeline
from vit_jax import logging
from vit_jax import models
from vit_jax import momentum_clip
from vit_jax import train
logger = logging.setup_logger('./logs')
# Helper functions for images.
labelnames = dict(
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar10=('airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'),
# https://www.cs.toronto.edu/~kriz/cifar.html
cifar100=('apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'computer_keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm'),
# Addition for Dogs and Cats
dogscats=('dog', 'cat')
)
def make_label_getter(dataset):
"""Returns a function converting label indices to names."""
def getter(label):
if dataset in labelnames:
return labelnames[dataset][label]
return f'label={label}'
return getter
def show_img(img, ax=None, title=None):
"""Shows a single image."""
if ax is None:
ax = plt.gca()
ax.imshow(img[...])
ax.set_xticks([])
ax.set_yticks([])
if title:
ax.set_title(title)
def show_img_grid(imgs, titles):
"""Shows a grid of images."""
n = int(np.ceil(len(imgs)**.5))
_, axs = plt.subplots(n, n, figsize=(3 * n, 3 * n))
for i, (img, title) in enumerate(zip(imgs, titles)):
img = (img + 1) / 2 # Denormalize
show_img(img, axs[i // n][i % n], title)
"""### Load the Python Generator"""
def _shard(data):
data['image'] = tf.reshape(data['image'], [num_devices, -1, 384, 384, 3])
data['label'] = tf.reshape(data['label'], [num_devices, -1, 2])
return data
num_devices = len(jax.local_devices())
# The bypass
batch_size = 64
num_classes = 2
dataset = 'dogscats'
dgscts_train = MyDogsCats(ds_description_path='/content/dataset/CatsAndDogs/description.txt',
dataset_path='/content/dataset/CatsAndDogs',
set_type='train',
train_prop=0.8)
dgscts_test = MyDogsCats(ds_description_path='/content/dataset/CatsAndDogs/description.txt',
dataset_path='/content/dataset/CatsAndDogs',
set_type='test',
train_prop=0.8)
ds_train = dgscts_train.getDataset().batch(batch_size, drop_remainder=True)
ds_test = dgscts_test.getDataset().batch(batch_size, drop_remainder=True)
if num_devices is not None:
ds_train = ds_train.map(_shard, tf.data.experimental.AUTOTUNE)
ds_test = ds_test.map(_shard, tf.data.experimental.AUTOTUNE)
ds_test = ds_test.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
ds_train = ds_train.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
"""### Load dataset"""
# Fetch a batch of test images for illustration purposes.
batch = next(iter(ds_test.as_numpy_iterator()))
# Note the shape : [num_local_devices, local_batch_size, h, w, c]
# print(batch)
print(batch['image'].shape)
print(batch['label'].shape)
# Show some imags with their labels.
images, labels = batch['image'][1][:9], batch['label'][1][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
# Same as above, but with train images.
# Do you spot a difference?
# Check out input_pipeline.get_data() in the editor at your right to see how the
# images are preprocessed differently.
batch = next(iter(ds_train.as_numpy_iterator()))
images, labels = batch['image'][1][:9], batch['label'][1][:9]
titles = map(make_label_getter(dataset), labels.argmax(axis=1))
show_img_grid(images, titles)
[print(i.shape) for i in images]
"""### Load pre-trained"""
# Load model definition & initialize random parameters.
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=num_classes)
_, params = VisionTransformer.init_by_shape(
jax.random.PRNGKey(0),
# Discard the "num_local_devices" dimension of the batch for initialization.
[(batch['image'].shape[1:], batch['image'].dtype.name)])
# Load and convert pretrained checkpoint.
# This involves loading the actual pre-trained model results, but then also also
# modifying the parameters a bit, e.g. changing the final layers, and resizing
# the positional embeddings.
# For details, refer to the code and to the methods of the paper.
params = checkpoint.load_pretrained(
pretrained_path=f'{model}.npz',
init_params=params,
model_config=models.CONFIGS[model],
logger=logger,
)
"""### Evaluate"""
# So far, all our data is in the host memory. Let's now replicate the arrays
# into the devices.
# This will make every array in the pytree params become a ShardedDeviceArray
# that has the same data replicated across all local devices.
# For TPU it replicates the params in every core.
# For a single GPU this simply moves the data onto the device.
# For CPU it simply creates a copy.
params_repl = flax.jax_utils.replicate(params)
print('params.cls:', type(params['cls']).__name__, params['cls'].shape)
print('params_repl.cls:', type(params_repl['cls']).__name__, params_repl['cls'].shape)
# Then map the call to our model's forward pass onto all available devices.
vit_apply_repl = jax.pmap(VisionTransformer.call)
def get_accuracy(params_repl):
"""Returns accuracy evaluated on the test set."""
good = total = 0
steps = dgscts_test.num_samples // batch_size
#steps = input_pipeline.get_dataset_info(dataset, 'test')['num_examples'] // batch_size
for _, batch in zip(tqdm.notebook.trange(steps), ds_test.as_numpy_iterator()):
predicted = vit_apply_repl(params_repl, batch['image'])
is_same = predicted.argmax(axis=-1) == batch['label'].argmax(axis=-1)
good += is_same.sum()
total += len(is_same.flatten())
return good / total
# Random performance without fine-tuning.
get_accuracy(params_repl)
"""### Fine-tune"""
# 100 Steps take approximately 15 minutes in the TPU runtime.
total_steps = 10 ## 100
warmup_steps = 5
decay_type = 'cosine'
grad_norm_clip = 1
# This controls in how many forward passes the batch is split. 8 works well with
# a TPU runtime that has 8 devices. 64 should work on a GPU. You can of course
# also adjust the batch_size above, but that would require you to adjust the
# learning rate accordingly.
accum_steps = 8
base_lr = 0.03
# Check out train.make_update_fn in the editor on the right side for details.
update_fn_repl = train.make_update_fn(VisionTransformer.call, accum_steps)
# We use a momentum optimizer that uses half precision for state to save
# memory. It als implements the gradient clipping.
opt = momentum_clip.Optimizer(grad_norm_clip=grad_norm_clip).create(params)
opt_repl = flax.jax_utils.replicate(opt)
lr_fn = hyper.create_learning_rate_schedule(total_steps, base_lr, decay_type, warmup_steps)
# Prefetch entire learning rate schedule onto devices. Otherwise we would have
# a slow transfer from host to devices in every step.
lr_iter = hyper.lr_prefetch_iter(lr_fn, 0, total_steps)
# Initialize PRNGs for dropout.
update_rngs = jax.random.split(jax.random.PRNGKey(0), jax.local_device_count())
# The world's simplest training loop.
# Completes in ~20 min on the TPU runtime.
for step, batch, lr_repl in zip(
tqdm.notebook.trange(1, total_steps + 1),
ds_train.as_numpy_iterator(),
lr_iter
):
print("loop", step, batch['image'].shape, batch['label'].shape)
opt_repl, loss_repl, update_rngs = update_fn_repl(
opt_repl, lr_repl, batch, update_rngs)
print("fini la loop", type(opt_repl), type(loss_repl), type(update_rngs))
# Should be ~97.2% for CIFAR10
# Should be ~71.2% for CIFAR100
get_accuracy(opt_repl.target)
"""### Inference"""
# Download model pre-trained on imagenet21k and fine-tuned on imagenet2012.
![ -e "$model"_imagenet2012.npz ] || gsutil cp gs://vit_models/imagenet21k+imagenet2012/"$model".npz "$model"_imagenet2012.npz
VisionTransformer = models.KNOWN_MODELS[model].partial(num_classes=1000)
# Load and convert pretrained checkpoint.
params = checkpoint.load(f'{model}_imagenet2012.npz')
params['pre_logits'] = {} # Need to restore empty leaf for Flax.
# Get imagenet labels.
!wget https://storage.googleapis.com/bit_models/ilsvrc2012_wordnet_lemmas.txt
imagenet_labels = dict(enumerate(open('ilsvrc2012_wordnet_lemmas.txt')))
# Get a random picture with the correct dimensions.
!wget https://picsum.photos/384 -O picsum.jpg
import PIL
img = PIL.Image.open('picsum.jpg')
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = VisionTransformer.call(params, (np.array(img) / 128 - 1)[None, ...])
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
"""## 3) Nos test
### Resize sans garder les proportions
"""
# Get a random picture with the correct dimensions.
!wget https://lorraine.gatech.edu/sites/default/files/uploads/images/superblock_images/metz-campus.jpeg -O pic_gatech.jpg
import PIL
img = PIL.Image.open('pic_gatech.jpg')
#img
img = img.resize((384,384))
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = VisionTransformer.call(params, (np.array(img) / 128 - 1)[None, ...])
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
"""### Resize en gardant une propostion carré"""
# Get a random picture with the correct dimensions.
!wget https://lorraine.gatech.edu/sites/default/files/uploads/images/superblock_images/metz-campus.jpeg -O pic_gatech.jpg
import PIL
img = PIL.Image.open('pic_gatech.jpg')
(w, h) = (img.width, img.height)
if w>=h:
crop_box = ((w/2)-(h/2), 0, (w/2)+(h/2), h)
else:
crop_box = ((h/2)-(w/2), 0, (h/2)+(w/2), w)
img = img.resize((384,384), box=crop_box)
img
# Predict on a batch with a single item (note very efficient TPU usage...)
logits, = VisionTransformer.call(params, (np.array(img) / 128 - 1)[None, ...])
preds = flax.nn.softmax(logits)
for idx in preds.argsort()[:-11:-1]:
print(f'{preds[idx]:.5f} : {imagenet_labels[idx]}', end='')
| 38.007614
| 1,046
| 0.67576
| 4,425
| 0.147638
| 1,047
| 0.034933
| 0
| 0
| 0
| 0
| 13,541
| 0.451788
|
36c9545921e82accc771994b4028870845e16cb0
| 19,349
|
py
|
Python
|
tests/test_cli.py
|
jameswilkerson/elex
|
27733e3c473fef48676f8bdd56247bee49ad32ea
|
[
"Apache-2.0"
] | 183
|
2015-11-25T15:13:47.000Z
|
2022-01-07T23:02:36.000Z
|
tests/test_cli.py
|
jameswilkerson/elex
|
27733e3c473fef48676f8bdd56247bee49ad32ea
|
[
"Apache-2.0"
] | 198
|
2015-11-24T16:48:48.000Z
|
2020-10-26T10:38:56.000Z
|
tests/test_cli.py
|
jameswilkerson/elex
|
27733e3c473fef48676f8bdd56247bee49ad32ea
|
[
"Apache-2.0"
] | 65
|
2015-12-03T21:29:38.000Z
|
2021-08-10T20:03:49.000Z
|
import csv
import sys
import json
import tests
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
from six import with_metaclass
from elex.cli.app import ElexApp
from collections import OrderedDict
DATA_FILE = 'tests/data/20151103_national.json'
DATA_ELECTION_DATE = '2015-11-03'
DELSUM_DATA_FILE = 'tests/data/20160118_delsum.json'
DELSUPER_DATA_FILE = 'tests/data/20160118_delsuper.json'
ELECTIONS_DATA_FILE = 'tests/data/00000000_elections.json'
DISTRICT_DATA_FILE = 'tests/data/20160201_district_results.json'
TEST_COMMANDS = [
'races',
'candidates',
'reporting-units',
'candidate-reporting-units',
'results',
]
class ElexCLICSVTestMeta(type):
def __new__(mcs, name, bases, dict):
def gen_fields_test(command):
"""
Dynamically generate a fields test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
api_fields = api_data[0].serialize().keys()
self.assertEqual(cli_fields, list(api_fields))
return test
def gen_length_test(command):
"""
Dynamically generate a data length test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
self.assertEqual(len(cli_data), len(api_data))
return test
def gen_data_test(command):
"""
Dynamically generate a data test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
for i, row in enumerate(cli_data):
for k, v in api_data[i].serialize().items():
if v is None:
v = ''
self.assertEqual(row[k], str(v))
return test
def gen_timestamp_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
self.assertEqual(cli_fields[-1], 'timestamp')
return test
def gen_timestamp_data_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
for row in cli_data:
try:
self.assertTrue(unicode(row['timestamp']).isnumeric())
except NameError:
self.assertTrue(str(row['timestamp']).isnumeric())
return test
def gen_batch_name_data_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
batch_name='batch-01')
for row in cli_data:
self.assertEqual(row['batchname'], 'batch-01')
return test
for command in TEST_COMMANDS:
fields_test_name = 'test_csv_{0}_fields'.format(
command.replace('-', '_')
)
dict[fields_test_name] = gen_fields_test(command)
length_test_name = 'test_csv_{0}_length'.format(
command.replace('-', '_')
)
dict[length_test_name] = gen_length_test(command)
data_test_name = 'test_csv_{0}_data'.format(
command.replace('-', '_')
)
dict[data_test_name] = gen_data_test(command)
timestamp_test_name = 'test_csv_{0}_timestamp'.format(
command.replace('-', '_')
)
dict[timestamp_test_name] = gen_timestamp_test(command)
timestamp_data_test_name = 'test_csv_{0}_timestamp_data'.format(
command.replace('-', '_')
)
dict[timestamp_data_test_name] = gen_timestamp_data_test(command)
batch_name_data_test_name = 'test_csv_{0}_batch_name_data'.format(
command.replace('-', '_')
)
dict[batch_name_data_test_name] = gen_batch_name_data_test(command)
return type.__new__(mcs, name, bases, dict)
class ElexCLICSVTestCase(
with_metaclass(ElexCLICSVTestMeta, tests.ElectionResultsTestCase)
):
"""
This testing class is mostly dynamically generated by its metaclass.
The goal of the CLI tests is to the make sure the CLI output matches the
Python API. The API tests guarantee the validity of the data, while these
tests guarantee the CLI provides the same data in CSV format.
"""
def test_csv_elections_fields(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_csv_elections_length(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(len(data), 11)
def test_csv_elections_date(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['electiondate'], '2015-08-04')
def test_csv_elections_liveresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['liveresults'], 'False')
def test_csv_elections_testresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['testresults'], 'True')
def test_csv_next_election_fields(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_csv_next_election_length(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(len(data), 1)
def test_csv_next_election_date(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['electiondate'], '2015-08-25')
def test_csv_next_election_liveresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['liveresults'], 'True')
def test_csv_next_election_testresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['testresults'], 'False')
def test_csv_delegate_fields(self):
fields, data = self._test_command(command='delegates')
self.assertEqual(
fields,
[
'level', 'party_total', 'superdelegates_count', 'last',
'state', 'candidateid', 'party_need', 'party',
'delegates_count', 'id', 'd1', 'd7', 'd30'
]
)
def test_csv_delegate_state_count(self):
fields, data = self._test_command(command='delegates')
number_of_states = list(
set([d['state'] for d in data if d['level'] == 'state'])
)
self.assertEqual(58, len(number_of_states))
def test_csv_results_resultslevel(self):
fields, data = self._test_command(
command='results',
datafile=DISTRICT_DATA_FILE,
resultslevel='district'
)
self.assertEqual(data[17]['reportingunitname'], 'District 1')
def _test_command(
self,
command,
datafile=DATA_FILE,
delsum_datafile=DELSUM_DATA_FILE,
delsuper_datafile=DELSUPER_DATA_FILE,
electiondate=DATA_ELECTION_DATE,
resultslevel=None,
with_timestamp=False,
batch_name=False
):
"""
Execute an `elex` sub-command; returns fieldnames and rows
"""
stdout_backup = sys.stdout
sys.stdout = StringIO()
argv = [command]
if electiondate is not None:
argv.append(electiondate)
argv = argv + ['--data-file', datafile]
argv = argv + ['--delegate-sum-file', delsum_datafile]
argv = argv + ['--delegate-super-file', delsuper_datafile]
argv = argv + ['--results-level', resultslevel]
if with_timestamp:
argv = argv + ['--with-timestamp']
if batch_name:
argv = argv + ['--batch-name', batch_name]
app = ElexApp(argv=argv)
app.setup()
app.log.set_level('FATAL')
app.run()
lines = sys.stdout.getvalue().split('\n')
reader = csv.DictReader(lines)
sys.stdout.close()
sys.stdout = stdout_backup
return reader.fieldnames, list(reader)
class ElexCLIJSONTestMeta(type):
def __new__(mcs, name, bases, dict):
def gen_fields_test(command):
"""
Dynamically generate a fields test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
api_fields = api_data[0].serialize().keys()
self.assertEqual(cli_fields, list(api_fields))
return test
def gen_length_test(command):
"""
Dynamically generate a data length test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
self.assertEqual(len(cli_data), len(api_data))
return test
def gen_data_test(command):
"""
Dynamically generate a data test
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command)
api_data = getattr(self, command.replace('-', '_'))
for i, row in enumerate(cli_data):
for k, v in api_data[i].serialize().items():
self.assertEqual(row[k], v)
return test
def gen_timestamp_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
self.assertEqual(cli_fields[-1], 'timestamp')
return test
def gen_timestamp_data_test(command):
"""
Generate test to ensure timestamp data is an integer
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
with_timestamp=True)
for row in cli_data:
try:
self.assertTrue(unicode(row['timestamp']).isnumeric())
except NameError:
self.assertTrue(str(row['timestamp']).isnumeric())
return test
def gen_batch_name_data_test(command):
"""
Generate test to ensure timestamp field is set
"""
def test(self):
cli_fields, cli_data = self._test_command(command=command,
batch_name='batch-01')
for row in cli_data:
self.assertEqual(row['batchname'], 'batch-01')
return test
for command in TEST_COMMANDS:
fields_test_name = 'test_json_{0}_fields'.format(
command.replace('-', '_')
)
dict[fields_test_name] = gen_fields_test(command)
length_test_name = 'test_json_{0}_length'.format(
command.replace('-', '_')
)
dict[length_test_name] = gen_length_test(command)
data_test_name = 'test_json_{0}_data'.format(
command.replace('-', '_')
)
dict[data_test_name] = gen_data_test(command)
timestamp_data_test_name = 'test_json_{0}_data_timestamp'.format(
command.replace('-', '_')
)
dict[timestamp_data_test_name] = gen_timestamp_test(command)
timestamp_data_test_name = 'test_json_{0}_timestamp_data'.format(
command.replace('-', '_')
)
dict[timestamp_data_test_name] = gen_timestamp_data_test(command)
batch_name_data_test_name = 'test_csv_{0}_batch_name_data'.format(
command.replace('-', '_')
)
dict[batch_name_data_test_name] = gen_batch_name_data_test(command)
return type.__new__(mcs, name, bases, dict)
class ElexCLIJSONTestCase(
with_metaclass(ElexCLIJSONTestMeta, tests.ElectionResultsTestCase)
):
"""
This testing class is mostly dynamically generated by its metaclass.
The goal of the CLI tests is to the make sure the CLI output matches the
Python API. The API tests guarantee the validity of the data, while these
tests guarantee the CLI provides the same data in JSON format.
"""
def test_json_elections_fields(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_json_elections_length(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(len(data), 11)
def test_json_elections_date(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['electiondate'], '2015-08-04')
def test_json_elections_liveresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['liveresults'], False)
def test_json_elections_testresults(self):
fields, data = self._test_command(
command='elections',
datafile=ELECTIONS_DATA_FILE
)
self.assertEqual(data[4]['testresults'], True)
def test_json_next_election_fields(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(
fields,
['id', 'electiondate', 'liveresults', 'testresults']
)
def test_json_next_election_length(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(len(data), 1)
def test_json_next_election_date(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['electiondate'], '2015-08-25')
def test_json_next_election_liveresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['liveresults'], True)
def test_json_next_election_testresults(self):
fields, data = self._test_command(
command='next-election',
datafile=ELECTIONS_DATA_FILE,
electiondate='2015-08-04'
)
self.assertEqual(data[0]['testresults'], False)
def test_json_delegate_fields(self):
fields, data = self._test_command(command='delegates')
self.assertEqual(
fields,
[
'level', 'party_total', 'superdelegates_count', 'last',
'state', 'candidateid', 'party_need', 'party',
'delegates_count', 'id', 'd1', 'd7', 'd30'
]
)
def test_json_delegate_state_count(self):
fields, data = self._test_command(command='delegates')
number_of_states = list(
set([d['state'] for d in data if d['level'] == 'state'])
)
self.assertEqual(58, len(number_of_states))
def test_json_results_resultslevel(self):
fields, data = self._test_command(
command='results',
datafile=DISTRICT_DATA_FILE,
resultslevel='district'
)
self.assertEqual(data[17]['reportingunitname'], 'District 1')
def _test_command(
self,
command,
datafile=DATA_FILE,
delsum_datafile=DELSUM_DATA_FILE,
delsuper_datafile=DELSUPER_DATA_FILE,
electiondate=DATA_ELECTION_DATE,
resultslevel=None,
with_timestamp=False,
batch_name=False
):
"""
Execute an `elex` sub-command; returns fieldnames and rows
"""
stdout_backup = sys.stdout
sys.stdout = StringIO()
argv = [command]
argv.append(electiondate)
argv = argv + ['--data-file', datafile, '-o', 'json']
argv = argv + ['--delegate-sum-file', delsum_datafile]
argv = argv + ['--delegate-super-file', delsuper_datafile]
argv = argv + ['--results-level', resultslevel]
if with_timestamp:
argv = argv + ['--with-timestamp']
if batch_name:
argv = argv + ['--batch-name', batch_name]
app = ElexApp(argv=argv)
app.setup()
app.log.set_level('FATAL')
app.run()
json_data = sys.stdout.getvalue()
data = json.loads(json_data, object_pairs_hook=OrderedDict)
sys.stdout.close()
sys.stdout = stdout_backup
return list(data[0].keys()), data
| 33.826923
| 80
| 0.567213
| 18,662
| 0.964494
| 0
| 0
| 0
| 0
| 0
| 0
| 3,913
| 0.202233
|
36cad5c25faaf8cf1d768a98197ce4f6fa877fa3
| 4,321
|
py
|
Python
|
unipipeline/worker/uni_worker_consumer.py
|
aliaksandr-master/unipipeline
|
d8eac38534172aee59ab5777321cabe67f3779ef
|
[
"MIT"
] | null | null | null |
unipipeline/worker/uni_worker_consumer.py
|
aliaksandr-master/unipipeline
|
d8eac38534172aee59ab5777321cabe67f3779ef
|
[
"MIT"
] | 1
|
2021-09-14T13:08:13.000Z
|
2021-09-14T13:08:13.000Z
|
unipipeline/worker/uni_worker_consumer.py
|
aliaksandr-master/unipipeline
|
d8eac38534172aee59ab5777321cabe67f3779ef
|
[
"MIT"
] | null | null | null |
from typing import TypeVar, Generic, Optional, Type, Any, Union, Dict, TYPE_CHECKING
from unipipeline.errors.uni_payload_error import UniPayloadParsingError, UniAnswerPayloadParsingError
from unipipeline.errors.uni_sending_to_worker_error import UniSendingToWorkerError
from unipipeline.answer.uni_answer_message import UniAnswerMessage
from unipipeline.brokers.uni_broker_message_manager import UniBrokerMessageManager
from unipipeline.errors.uni_work_flow_error import UniWorkFlowError
from unipipeline.message.uni_message import UniMessage
from unipipeline.message_meta.uni_message_meta import UniMessageMeta, UniMessageMetaErrTopic, UniAnswerParams
from unipipeline.worker.uni_worker import UniWorker
from unipipeline.worker.uni_worker_consumer_manager import UniWorkerConsumerManager
from unipipeline.worker.uni_worker_consumer_message import UniWorkerConsumerMessage
from unipipeline.definitions.uni_worker_definition import UniWorkerDefinition
if TYPE_CHECKING:
from unipipeline.modules.uni_mediator import UniMediator
TInputMsgPayload = TypeVar('TInputMsgPayload', bound=UniMessage)
TAnswerMsgPayload = TypeVar('TAnswerMsgPayload', bound=Optional[UniMessage])
class UniWorkerConsumer(Generic[TInputMsgPayload, TAnswerMsgPayload]):
def __init__(self, definition: UniWorkerDefinition, mediator: 'UniMediator', worker_type: Type[UniWorker[TInputMsgPayload, TAnswerMsgPayload]]) -> None:
self._definition = definition
self._mediator = mediator
self._worker_manager = UniWorkerConsumerManager(self.send_to)
self._worker = worker_type(self._worker_manager)
self._uni_echo = mediator.echo.mk_child(f'worker[{definition.name}]')
self._input_message_type: Type[TInputMsgPayload] = mediator.get_message_type(self._definition.input_message.name) # type: ignore
self._answer_message_type: Optional[Type[TAnswerMsgPayload]] = mediator.get_message_type(self._definition.answer_message.name) if self._definition.answer_message is not None else None # type: ignore
self._current_meta: Optional[UniMessageMeta] = None
def send_to(self, worker: Union[Type['UniWorker[Any, Any]'], str], data: Union[Dict[str, Any], UniMessage], *, alone: bool = False, need_answer: bool = False) -> Optional[UniAnswerMessage[UniMessage]]:
wd = self._mediator.config.get_worker_definition(worker)
if wd.name not in self._definition.output_workers:
raise UniSendingToWorkerError(f'worker {wd.name} is not defined in workers->{self._definition.name}->output_workers')
if need_answer and not wd.need_answer:
raise UniWorkFlowError(f'you will get no response form worker {wd.name}')
if need_answer:
answ_params = UniAnswerParams(topic=self._definition.answer_topic, id=self._worker_manager.id)
return self._mediator.send_to(wd.name, data, parent_meta=self._current_meta, answer_params=answ_params, alone=alone)
self._mediator.send_to(wd.name, data, parent_meta=self._current_meta, answer_params=None, alone=alone)
return None
def process_message(self, meta: UniMessageMeta, manager: UniBrokerMessageManager) -> None:
self._current_meta = meta
msg = UniWorkerConsumerMessage[TInputMsgPayload](self._input_message_type, manager, meta)
try:
result: Optional[Union[TAnswerMsgPayload, Dict[str, Any]]] = self._worker.handle_message(msg)
except UniAnswerPayloadParsingError as e:
self._mediator.move_to_error_topic(self._definition, meta, UniMessageMetaErrTopic.HANDLE_MESSAGE_ERR, e)
except UniPayloadParsingError as e:
self._mediator.move_to_error_topic(self._definition, meta, UniMessageMetaErrTopic.MESSAGE_PAYLOAD_ERR, e)
# except Exception as e: # TODO: correct error handling
# self._mediator.move_to_error_topic(self._definition, meta, UniMessageMetaErrTopic.HANDLE_MESSAGE_ERR, e)
else:
if self._definition.need_answer:
try:
self._mediator.answer_to(self._definition.name, meta, result, unwrapped=self._definition.answer_unwrapped)
except UniSendingToWorkerError:
pass
if self._definition.ack_after_success:
msg.ack()
self._current_meta = None
| 61.728571
| 207
| 0.765564
| 3,143
| 0.727378
| 0
| 0
| 0
| 0
| 0
| 0
| 428
| 0.099051
|
36cae8cc11223214274fe92b0ac8c6515461f9fe
| 1,825
|
py
|
Python
|
funing/_ui/about.py
|
larryw3i/Funing
|
8ef88af8766f0246614517fa00f3b322ba722d6b
|
[
"MIT"
] | 1
|
2021-08-22T05:56:09.000Z
|
2021-08-22T05:56:09.000Z
|
funing/_ui/about.py
|
larryw3i/Funing
|
8ef88af8766f0246614517fa00f3b322ba722d6b
|
[
"MIT"
] | null | null | null |
funing/_ui/about.py
|
larryw3i/Funing
|
8ef88af8766f0246614517fa00f3b322ba722d6b
|
[
"MIT"
] | null | null | null |
import gettext
import os
import re
import subprocess
import sys
import time
import tkinter as tk
import tkinter.filedialog as tkf
import uuid
import webbrowser
from datetime import date, datetime
from enum import Enum
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import *
import cv2
import numpy as np
import pygubu
import yaml
from PIL import Image, ImageTk
from funing import *
from funing.locale import _
from funing.settings import *
translator = _
class AboutTkApplication(pygubu.TkApplication):
def __init__(self):
# pygubu builder
self.builder = pygubu.Builder(translator)
# ui files
about_ui_path = os.path.join(
os.path.join(project_path, 'ui'), 'about.ui')
# add ui files
self.builder.add_from_file(about_ui_path)
self.mainwindow = None
self.is_showing = False
def on_about_ok_btn_clicked(self):
self.about_ok()
def about_ok(self):
self.trigger()
def quit(self, event=None):
self.mainwindow.withdraw()
self.is_showing = False
def run(self):
if not self.mainwindow:
self.mainwindow = self.builder.get_object('about_toplevel')
self.mainwindow.title(_('About Funing'))
self.builder.get_object('version_label')['text'] = version
self.mainwindow.protocol("WM_DELETE_WINDOW", self.on_closing)
# connect callbacks
self.builder.connect_callbacks(self)
else:
self.mainwindow.deiconify()
self.is_showing = True
def on_closing(self):
self.quit()
def trigger(self):
if not self.is_showing:
self.run()
else:
self.quit()
def view_source_code(self, *args):
webbrowser.open(source_page)
| 23.397436
| 73
| 0.656438
| 1,340
| 0.734247
| 0
| 0
| 0
| 0
| 0
| 0
| 142
| 0.077808
|
36cd33528502d61cfd130bce552b6359665140f3
| 8,039
|
py
|
Python
|
pysnmp-with-texts/Fore-Common-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/Fore-Common-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/Fore-Common-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module Fore-Common-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Fore-Common-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:14:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Bits, MibIdentifier, enterprises, Counter64, Unsigned32, ModuleIdentity, Counter32, TimeTicks, NotificationType, ObjectIdentity, IpAddress, Gauge32, Integer32, iso, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "MibIdentifier", "enterprises", "Counter64", "Unsigned32", "ModuleIdentity", "Counter32", "TimeTicks", "NotificationType", "ObjectIdentity", "IpAddress", "Gauge32", "Integer32", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
fore = ModuleIdentity((1, 3, 6, 1, 4, 1, 326))
if mibBuilder.loadTexts: fore.setLastUpdated('9911050000Z')
if mibBuilder.loadTexts: fore.setOrganization('Marconi Communications')
if mibBuilder.loadTexts: fore.setContactInfo(' Postal: Marconi Communications, Inc. 5000 Marconi Drive Warrendale, PA 15086-7502 Tel: +1 724 742 6999 Email: bbrs-mibs@marconi.com Web: http://www.marconi.com')
if mibBuilder.loadTexts: fore.setDescription('Definitions common to all FORE private MIBS.')
admin = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1))
systems = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2))
foreExperiment = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 3))
operations = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 1))
snmpErrors = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 2))
snmpTrapDest = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 3))
snmpAccess = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 4))
assembly = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 5))
fileXfr = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 6))
rmonExtensions = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 7))
preDot1qVlanMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 8))
snmpTrapLog = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 9))
ilmisnmp = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 10))
entityExtensionMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 11))
ilmiRegistry = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 14))
foreIfExtension = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 15))
frameInternetworking = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 16))
ifExtensions = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 1, 17))
atmAdapter = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 1))
atmSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2))
etherSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 3))
atmAccess = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 5))
hubSwitchRouter = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 6))
ipoa = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 7))
stackSwitch = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 10))
switchRouter = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 15))
software = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 2))
asxd = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 2, 1))
hardware = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 1))
asx = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 1, 1))
asx200wg = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 4))
asx200bx = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 5))
asx200bxe = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 6))
cabletron9A000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 7))
asx1000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 8))
le155 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 9))
sfcs200wg = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 10))
sfcs200bx = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 11))
sfcs1000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 12))
tnx210 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 15))
tnx1100 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 16))
asx1200 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 17))
asx4000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 18))
le25 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 19))
esx3000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 20))
tnx1100b = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 21))
asx150 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 22))
bxr48000 = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 24))
asx4000m = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 25))
axhIp = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 26))
axhSig = MibIdentifier((1, 3, 6, 1, 4, 1, 326, 2, 2, 27))
class SpansAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class AtmAddress(OctetString):
subtypeSpec = OctetString.subtypeSpec + ConstraintsUnion(ValueSizeConstraint(8, 8), ValueSizeConstraint(20, 20), )
class NsapPrefix(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(13, 13)
fixedLength = 13
class NsapAddr(OctetString):
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(20, 20)
fixedLength = 20
class TransitNetwork(DisplayString):
subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(1, 4)
class TrapNumber(Integer32):
pass
class EntryStatus(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("valid", 1), ("createRequest", 2), ("underCreation", 3), ("invalid", 4))
class AtmSigProtocol(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))
namedValues = NamedValues(("other", 1), ("spans", 2), ("q2931", 3), ("pvc", 4), ("spvc", 5), ("oam", 6), ("spvcSpans", 7), ("spvcPnni", 8), ("rcc", 9), ("fsig", 10), ("mpls", 11), ("ipCtl", 12), ("oam-ctl", 13))
class GeneralState(Integer32):
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("normal", 1), ("fail", 2))
class IntegerBitString(Integer32):
pass
class ConnectionType(Integer32):
pass
mibBuilder.exportSymbols("Fore-Common-MIB", ilmiRegistry=ilmiRegistry, fore=fore, ilmisnmp=ilmisnmp, NsapPrefix=NsapPrefix, atmAccess=atmAccess, snmpTrapDest=snmpTrapDest, rmonExtensions=rmonExtensions, preDot1qVlanMIB=preDot1qVlanMIB, operations=operations, ipoa=ipoa, software=software, tnx1100=tnx1100, snmpErrors=snmpErrors, sfcs200bx=sfcs200bx, snmpAccess=snmpAccess, sfcs200wg=sfcs200wg, le25=le25, sfcs1000=sfcs1000, esx3000=esx3000, frameInternetworking=frameInternetworking, asx4000m=asx4000m, AtmAddress=AtmAddress, assembly=assembly, ConnectionType=ConnectionType, axhIp=axhIp, bxr48000=bxr48000, ifExtensions=ifExtensions, asx=asx, asxd=asxd, asx4000=asx4000, TransitNetwork=TransitNetwork, fileXfr=fileXfr, EntryStatus=EntryStatus, foreIfExtension=foreIfExtension, asx1000=asx1000, asx200bxe=asx200bxe, axhSig=axhSig, TrapNumber=TrapNumber, SpansAddress=SpansAddress, IntegerBitString=IntegerBitString, atmSwitch=atmSwitch, cabletron9A000=cabletron9A000, AtmSigProtocol=AtmSigProtocol, tnx1100b=tnx1100b, asx200bx=asx200bx, etherSwitch=etherSwitch, asx1200=asx1200, hubSwitchRouter=hubSwitchRouter, entityExtensionMIB=entityExtensionMIB, switchRouter=switchRouter, NsapAddr=NsapAddr, asx200wg=asx200wg, systems=systems, atmAdapter=atmAdapter, foreExperiment=foreExperiment, PYSNMP_MODULE_ID=fore, admin=admin, le155=le155, GeneralState=GeneralState, hardware=hardware, stackSwitch=stackSwitch, asx150=asx150, tnx210=tnx210, snmpTrapLog=snmpTrapLog)
| 73.752294
| 1,461
| 0.707302
| 1,520
| 0.189078
| 0
| 0
| 0
| 0
| 0
| 0
| 1,271
| 0.158104
|
36d0e1753fba4845d6f1c53b001fd0c1077f6cbc
| 2,753
|
py
|
Python
|
lib/logger.py
|
YahiaKandeel/ironport-correlator
|
cb426f412dba403f056c40eef631f0c252eada08
|
[
"Apache-2.0"
] | 6
|
2019-10-28T01:18:51.000Z
|
2022-01-26T11:43:14.000Z
|
lib/logger.py
|
YahiaKandeel/ironport-correlator
|
cb426f412dba403f056c40eef631f0c252eada08
|
[
"Apache-2.0"
] | null | null | null |
lib/logger.py
|
YahiaKandeel/ironport-correlator
|
cb426f412dba403f056c40eef631f0c252eada08
|
[
"Apache-2.0"
] | 2
|
2020-04-30T11:17:27.000Z
|
2021-11-17T02:26:48.000Z
|
################################################################################
# Styler & Logger
################################################################################
from logging.handlers import SysLogHandler
import logging
import json
import pprint
import time
from .decoder import decode
import collections
# Log Keys Order
keys = [
'ICID', 'MID', "MessageID", 'Related_MID',
'OutbreakFilters', 'CASE', 'GRAYMAIL', 'Antivirus', 'LDAP_Drop',
'SPF', 'DKIM', 'DKIM_Detail', 'DMARK', 'DMARK_Detail',
"Subject", "Attachments", "From", "To",
"SenderReputation", "ThreatCategory", "SuspectedDomains", "DomainAge",
'Action', 'Action_Desc', 'Content_Filter', "IP", "Other"
]
# Syslog
def syslog(siemContext):
'''
Return a syslogger instance
'''
# Create Handler
handler = SysLogHandler(address=(siemContext["server"], siemContext["port"]),
facility=SysLogHandler.LOG_LOCAL5)
# Configure Logger
logger = logging.getLogger(siemContext["ident"])
logger.setLevel(logging.INFO)
# Configure Formater
formatter = logging.Formatter('%(name)s: %(message)r')
handler.setFormatter(formatter)
# Add handler to the logger
logger.addHandler(handler)
# return
return logger
def style(message, msgexpand):
'''
Style and expand a message
'''
message_log = collections.OrderedDict()
result = []
for key in keys:
values = filter(None, message.get(key, []))
message_log[key] = ' || '.join(list(set(values)))
# Decode Subject & Attachments
message_log["Subject"] = decode(message_log["Subject"])
# message_log["Attachments"] = decode(message_log["Attachments"])
# If msgexpand
if msgexpand:
for recipient in message.get('To', []):
message_log['To'] = recipient
result.append(
json.dumps(message_log, ensure_ascii=False))
# Else
else:
result.append(
json.dumps(message_log, ensure_ascii=False))
return result
def syslogger(logger_queue, siemContext, options):
'''
Logger Process
'''
print("\t[+]Starting Logger Process")
# Logger
logger = syslog(siemContext)
while True:
# Get Data from Logger Queue
data = logger_queue.get()
# If there is a message
if data:
[(mid, message)] = data.items()
# Style It
messages = style(message, options["expand"])
# Log
for message in messages:
logger.info(message)
print('\r\n'+'#' * 100)
pprint.pprint(json.loads(message))
else:
# sleep
time.sleep(0.05)
| 27.808081
| 81
| 0.564475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,014
| 0.368325
|
36d148c1ce0bdea8709582045309f0f2acad2b33
| 954
|
py
|
Python
|
services/web/apps/inv/inv/plugins/log.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
services/web/apps/inv/inv/plugins/log.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
services/web/apps/inv/inv/plugins/log.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ---------------------------------------------------------------------
# inv.inv log plugin
# ---------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from .base import InvPlugin
class LogPlugin(InvPlugin):
name = "log"
js = "NOC.inv.inv.plugins.log.LogPanel"
def get_data(self, request, o):
return {
"id": str(o.id),
"name": o.name,
"model": o.model.name,
"log": [
{
"ts": x.ts.isoformat(),
"user": x.user,
"system": x.system,
"managed_object": x.managed_object,
"op": x.op,
"message": x.message,
}
for x in o.get_log()
],
}
| 28.909091
| 71
| 0.336478
| 603
| 0.632075
| 0
| 0
| 0
| 0
| 0
| 0
| 420
| 0.440252
|
36d22a39c7974086f08155ff6bf52d3cb2267f62
| 574
|
py
|
Python
|
blender/arm/logicnode/transform/LN_separate_quaternion.py
|
niacdoial/armory
|
3f9b633fbf772017c576a3f77695a6c28d9956e1
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/transform/LN_separate_quaternion.py
|
niacdoial/armory
|
3f9b633fbf772017c576a3f77695a6c28d9956e1
|
[
"Zlib"
] | null | null | null |
blender/arm/logicnode/transform/LN_separate_quaternion.py
|
niacdoial/armory
|
3f9b633fbf772017c576a3f77695a6c28d9956e1
|
[
"Zlib"
] | null | null | null |
from arm.logicnode.arm_nodes import *
class SeparateQuaternionNode(ArmLogicTreeNode):
"""TO DO."""
bl_idname = 'LNSeparateQuaternionNode'
bl_label = "Separate Quaternion"
arm_section = 'quaternions'
arm_version = 1
def init(self, context):
super(SeparateQuaternionNode, self).init(context)
self.add_input('NodeSocketVector', 'Quaternion')
self.add_output('NodeSocketFloat', 'X')
self.add_output('NodeSocketFloat', 'Y')
self.add_output('NodeSocketFloat', 'Z')
self.add_output('NodeSocketFloat', 'W')
| 31.888889
| 57
| 0.679443
| 533
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.317073
|
36d3212ee65298917f85198d847d449f780e78c7
| 3,110
|
py
|
Python
|
tools/exporter_python/exporter.py
|
moriyalb/hades
|
ea2743a23022f65b3931eb482b6ec18804410ba3
|
[
"MIT"
] | 5
|
2018-05-18T10:01:46.000Z
|
2021-08-18T13:59:47.000Z
|
tools/exporter_python/exporter.py
|
moriyalb/hades
|
ea2743a23022f65b3931eb482b6ec18804410ba3
|
[
"MIT"
] | null | null | null |
tools/exporter_python/exporter.py
|
moriyalb/hades
|
ea2743a23022f65b3931eb482b6ec18804410ba3
|
[
"MIT"
] | null | null | null |
import getopt
import sys
import os
import schema
import server
import orm
CLIENT_TYPE = {
'--client_lua_path' : "lua",
'--client_cs_path' : "cs",
'--client_cpp_path' : "cpp",
'--client_js_path' : "js",
'--client_python_path' : "python",
}
def export():
opts, args = getopt.getopt(sys.argv[1:], '-h-u:', ['help',
'server_path=',
'client_lua_path=', 'client_cs_path=', 'client_cpp_path=', 'client_js_path=', 'client_python_path=',
'user='])
user = None
exportClient = {}
exportServer = None
for tag, value in opts:
if tag in ('-h', '--help'):
print('''
--server_path 表示服务器项目路径(内含Defines、Entities、Configs、CustomConfigs等文件夹)
--client_lua_path 表示客户端Lua导出路径(内含Proxy、ProxyDefine文件夹,此路径将放置导出的lua客户端脚本)
--client_cs_path 表示客户端C#导出路径(内含Proxy、ProxyDefine文件夹,此路径将放置导出的C#客户端脚本)
--client_cpp_path 表示客户端C++导出路径(内含Proxy、ProxyDefine文件夹,此路径将放置导出的C++客户端脚本)
--client_js_path 表示客户端js导出路径(内含Proxy、ProxyDefine文件夹,此路径将放置导出的js客户端脚本)
--client_python_path 表示客户端js导出路径(内含Proxy、ProxyDefine文件夹,此路径将放置导出的python客户端脚本)
--user(-u) 表示服务器用户环境(不指定用户将无法导出服务器相关配置)
--help(-h) 显示帮助信息''')
exit()
if tag in ('-u','--user'):
user = value
if tag == '--server_path':
exportServer = value
if tag in CLIENT_TYPE:
exportClient[CLIENT_TYPE[tag]] = value
if not exportServer:
print("Error in Exporter : no server_path -> ")
return
elif not os.path.exists(exportServer):
print("Error in Exporter : invalid server_path -> ", exportServer)
return
if not user:
print("== Please set your user name in preference.bat ==")
print("== set USER=mario ==")
print("The user name settings exists at Server/Project/CustomConfigs")
return
else:
cfgPath = exportServer + "/CustomConfigs/" + user
if not os.path.exists(cfgPath):
print("Error in Exporter : invalid user -> ", user)
return
for ctype, cpath in exportClient.items():
if not os.path.exists(cpath):
print("Error in Exporter : invalid client_path -> ", ctype, cpath)
define_path = exportServer + "/Defines"
schemaCfg = schema.load(define_path)
cfgPath = exportServer + "/CustomConfigs/" + user
serverCfg = server.load(cfgPath)
exportCfgPath = exportServer + "/Configs"
exportSchemaPath = exportCfgPath + "/Schema"
exportServerPath = exportCfgPath + "/Server"
exportOrmPath = exportCfgPath + "/Orm"
schema.write(schemaCfg, exportSchemaPath)
server.write(serverCfg, exportServerPath)
orm.write(schemaCfg, exportOrmPath)
exportServerScriptPath = exportServer + "/Entities"
ss = __import__('server_js', globals(), locals(), [], 0)
ss.write(schemaCfg, exportServerScriptPath)
for ctype, cpath in exportClient.items():
sc = None
try:
sc = __import__('client_' + ctype, globals(), locals(), [], 0)
except Exception as e:
print("Exporter don't support the client script now. -> ", ctype)
if sc:
sc.writeCfg(schemaCfg, cpath + "/ProxyDefine")
sc.writeScript(schemaCfg, cpath + "/Proxy")
if __name__ == "__main__":
#try:
export()
#except Exception as e:
# print("Error in exporter -> ", e)
| 31.734694
| 104
| 0.684566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,781
| 0.503392
|
36d456418e0f32038550bac5f2b5a0f1d2148fc5
| 707
|
py
|
Python
|
python/python project/te330.py
|
WhitePhosphorus4/xh-learning-code
|
025e31500d9f46d97ea634d7fd311c65052fd78e
|
[
"Apache-2.0"
] | null | null | null |
python/python project/te330.py
|
WhitePhosphorus4/xh-learning-code
|
025e31500d9f46d97ea634d7fd311c65052fd78e
|
[
"Apache-2.0"
] | null | null | null |
python/python project/te330.py
|
WhitePhosphorus4/xh-learning-code
|
025e31500d9f46d97ea634d7fd311c65052fd78e
|
[
"Apache-2.0"
] | null | null | null |
import wx
class App(wx.App):
def OnInit(self):
self.locale = wx.Locale(wx.LANGUAGE_CHINESE)
return 1
def A(evt):
print("hello")
f.Maximize()
def B(evt):
b.SetBackgroundColour("#FFFFFF")
def C(evt):
b.SetBackgroundColour("#EFEFEF")
app = App()
f = wx.Frame(None, -1, "Hello", [700, 500])
wx.Button(f, size = [0, 0])
#s = wx.Image("uu.png", wx.BITMAP_TYPE_ANY).ConvertToBitmap()
b = wx.Button(f, -1,'Hello', size = [80, 30], style = wx.BORDER_NONE)
#bb= wx.StaticBitmap(b, -1, wx.Image("uu.png", wx.BITMAP_TYPE_ANY).ConvertToBitmap())
b.SetBackgroundColour("#FEFEFE")
b.Bind(wx.EVT_BUTTON, A)
b.Bind(wx.EVT_ENTER_WINDOW, B)
b.Bind(wx.EVT_LEAVE_WINDOW, C)
f.Show()
app.MainLoop()
| 27.192308
| 85
| 0.666195
| 107
| 0.151344
| 0
| 0
| 0
| 0
| 0
| 0
| 194
| 0.274399
|
36d4d897387b020fc8db80ecfcfac7847d28fc17
| 2,048
|
py
|
Python
|
examples/sneswii2gamepad/code.py
|
dglaude/CircuitPython_Joystic_Controller
|
a9ef8855b9be457b25c9a436fcbbf6aebe39b4e9
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
examples/sneswii2gamepad/code.py
|
dglaude/CircuitPython_Joystic_Controller
|
a9ef8855b9be457b25c9a436fcbbf6aebe39b4e9
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
examples/sneswii2gamepad/code.py
|
dglaude/CircuitPython_Joystic_Controller
|
a9ef8855b9be457b25c9a436fcbbf6aebe39b4e9
|
[
"Unlicense",
"MIT-0",
"MIT"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# You must add a gamepad HID device inside your boot.py file
# in order to use this example.
# See this Learn Guide for details:
# https://learn.adafruit.com/customizing-usb-devices-in-circuitpython/hid-devices#custom-hid-devices-3096614-9
import time
import board
import neopixel
led = neopixel.NeoPixel(board.NEOPIXEL, 1)
led.brightness = 0.3
led[0] = (0, 0, 0)
# SPDX-FileCopyrightText: 2021 John Furcean
# SPDX-License-Identifier: MIT
# Classic Controller also work with CLV-202.
# But the "Super Nintendo SNES Classic Mini Controller" has less button and not stick.
from wiichuck.classic_controller import ClassicController
controller = ClassicController(board.I2C())
# SPDX-FileCopyrightText: Copyright (c) 2021 Dan Halbert for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
import usb_hid
from hid_gamepad import Gamepad
gp = Gamepad(usb_hid.devices)
x=0
y=0
oldx=0
oldy=0
while True:
_, buttons, dpad, _ = controller.values
if buttons.A:
led[0] = (255, 0, 0)
if buttons.B:
led[0] = (255, 255, 0)
if buttons.X:
led[0] = (0, 0, 255)
if buttons.Y:
led[0] = (0, 255, 0)
if buttons.R:
led[0] = (0, 0, 0)
print("button R")
if buttons.L:
led[0] = (0, 0, 0)
print("button L")
if buttons.start:
led[0] = (0, 0, 0)
print("button start")
if buttons.select:
led[0] = (0, 0, 0)
print("button select")
if (y!=0) and not (dpad.up or dpad.down):
y=0
if dpad.up:
y = 127
led[0] = (0, 0, 0)
print("dpad up")
if dpad.down:
y = -127
led[0] = (0, 0, 0)
print("dpad down")
if (x!=0) and not (dpad.right or dpad.left):
x=0
if dpad.right:
x = 127
led[0] = (0, 0, 0)
print("dpad right")
if dpad.left:
x = -127
led[0] = (0, 0, 0)
print("dpad left")
gp.move_joysticks(x, y)
| 23.54023
| 110
| 0.605957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 740
| 0.361328
|
36d54bbaca57e4631b154f3ca77d029d7fd103ad
| 3,345
|
py
|
Python
|
spleeter/util.py
|
ashirviskas/spleeter-pytorch
|
853d4bb6048fae879543342a8278c298854637f3
|
[
"MIT"
] | 28
|
2019-11-29T10:23:16.000Z
|
2022-03-28T13:02:29.000Z
|
spleeter/util.py
|
ashirviskas/spleeter-pytorch
|
853d4bb6048fae879543342a8278c298854637f3
|
[
"MIT"
] | 2
|
2020-05-07T04:07:50.000Z
|
2020-07-13T02:40:41.000Z
|
spleeter/util.py
|
ashirviskas/spleeter-pytorch
|
853d4bb6048fae879543342a8278c298854637f3
|
[
"MIT"
] | 6
|
2020-05-31T08:23:56.000Z
|
2022-02-22T16:38:19.000Z
|
import numpy as np
import tensorflow as tf
from .unet import UNet
def tf2pytorch(checkpoint_path, num_instrumments):
tf_vars = {}
init_vars = tf.train.list_variables(checkpoint_path)
# print(init_vars)
for name, shape in init_vars:
try:
# print('Loading TF Weight {} with shape {}'.format(name, shape))
data = tf.train.load_variable(checkpoint_path, name)
tf_vars[name] = data
except Exception as e:
print('Load error')
conv_idx = 0
tconv_idx = 0
bn_idx = 0
outputs = []
for i in range(num_instrumments):
output = {}
outputs.append(output)
for j in range(1,7):
if conv_idx == 0:
conv_suffix = ""
else:
conv_suffix = "_" + str(conv_idx)
if bn_idx == 0:
bn_suffix = ""
else:
bn_suffix = "_" + str(bn_idx)
output['down{}_conv.weight'.format(j)] = np.transpose(
tf_vars["conv2d{}/kernel".format(conv_suffix)], (3, 2, 0, 1))
# print('conv dtype: ',output['down{}.0.weight'.format(j)].dtype)
output['down{}_conv.bias'.format(
j)] = tf_vars["conv2d{}/bias".format(conv_suffix)]
output['down{}_act.0.weight'.format(
j)] = tf_vars["batch_normalization{}/gamma".format(bn_suffix)]
output['down{}_act.0.bias'.format(
j)] = tf_vars["batch_normalization{}/beta".format(bn_suffix)]
output['down{}_act.0.running_mean'.format(
j)] = tf_vars['batch_normalization{}/moving_mean'.format(bn_suffix)]
output['down{}_act.0.running_var'.format(
j)] = tf_vars['batch_normalization{}/moving_variance'.format(bn_suffix)]
conv_idx += 1
bn_idx += 1
# up blocks
for j in range(1, 7):
if tconv_idx == 0:
tconv_suffix = ""
else:
tconv_suffix = "_" + str(tconv_idx)
if bn_idx == 0:
bn_suffix = ""
else:
bn_suffix= "_" + str(bn_idx)
output['up{}.0.weight'.format(j)] = np.transpose(
tf_vars["conv2d_transpose{}/kernel".format(tconv_suffix)], (3,2,0, 1))
output['up{}.0.bias'.format(
j)] = tf_vars["conv2d_transpose{}/bias".format(tconv_suffix)]
output['up{}.2.weight'.format(
j)] = tf_vars["batch_normalization{}/gamma".format(bn_suffix)]
output['up{}.2.bias'.format(
j)] = tf_vars["batch_normalization{}/beta".format(bn_suffix)]
output['up{}.2.running_mean'.format(
j)] = tf_vars['batch_normalization{}/moving_mean'.format(bn_suffix)]
output['up{}.2.running_var'.format(
j)] = tf_vars['batch_normalization{}/moving_variance'.format(bn_suffix)]
tconv_idx += 1
bn_idx += 1
if conv_idx == 0:
suffix = ""
else:
suffix = "_" + str(conv_idx)
output['up7.0.weight'] = np.transpose(
tf_vars['conv2d{}/kernel'.format(suffix)], (3, 2, 0, 1))
output['up7.0.bias'] = tf_vars['conv2d{}/bias'.format(suffix)]
conv_idx += 1
return outputs
| 36.358696
| 88
| 0.529746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 828
| 0.247534
|
36d54c415ce82d548b3b02e02ceb85813202d7ef
| 1,920
|
py
|
Python
|
SDEprocesses/explicit.py
|
PyPaperParrot/pystoch
|
14d1dbeefaeb3696378e0db6e565347df87a02bc
|
[
"MIT"
] | 1
|
2019-06-06T13:53:51.000Z
|
2019-06-06T13:53:51.000Z
|
SDEprocesses/explicit.py
|
PyPaperParrot/pystoch
|
14d1dbeefaeb3696378e0db6e565347df87a02bc
|
[
"MIT"
] | null | null | null |
SDEprocesses/explicit.py
|
PyPaperParrot/pystoch
|
14d1dbeefaeb3696378e0db6e565347df87a02bc
|
[
"MIT"
] | null | null | null |
import numpy as np
import exceptions as ex
def LogWalk(T, nSteps, mu, sigma, x_0=1, t_0=0, boundary=500):
ex._check_params(T, nSteps, t_0)
dt = T/(10*nSteps)
x_t = []
t = t_0
for i in range((10*nSteps)):
x = x_0*np.exp((mu - sigma**2/2)*t + sigma*np.random.randn()*np.sqrt(t))
if abs(x) > boundary:
raise Warning("Risk of going beyond the definition of a random process. Boundary: " + str(boundary) + ". If You wish You could change boundary conditions in parameters (default:'boundary'=500).")
x_t.append(x)
t += dt
return x_t
# 4. Процесс Орнштейна-Уленбека
def OrnsteinUlenbekProcess(T, nSteps, alpha, beta, _sigma, x_0=1, t_0=0, boundary=500):
ex._check_params(T, nSteps, t_0)
dt = T/(10*nSteps)
x_t = []
x_t.append(x_0)
t = t_0
for i in range(1, 10*nSteps):
x = alpha + (x_0 - alpha)*np.exp(-beta*t) + _sigma/np.sqrt(2*beta)*np.sqrt(1-np.exp(-2*beta*t))*np.random.randn()
if abs(x) > boundary:
raise Warning("Risk of going beyond the definition of a random process. Boundary: " + str(boundary) + ". If You wish You could change boundary conditions in parameters (default:'boundary'=500).")
x_t.append(x)
t += dt
return x_t
# 6. Броуновский мост
def BrownianBridge(T, nSteps, alpha, _sigma, x_0=1, t_0=0, boundary=500):
ex._check_params(T, nSteps, t_0)
dt = T/(10*nSteps)
x_t = []
x_t.append(x_0)
t = t_0
for i in range(1, 10*nSteps):
x = alpha + (x_0 - alpha)*(T - t)/(T - t_0) + _sigma*np.sqrt((t - t_0)*(T - t)/T - t_0)*np.random.randn()
if abs(x) > boundary:
raise Warning("Risk of going beyond the definition of a random process. Boundary: " + str(boundary) + ". If You wish You could change boundary conditions in parameters (default:'boundary'=500).")
x_t.append(x)
t += dt
return x_t
| 34.285714
| 207
| 0.611458
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 574
| 0.293007
|
36d5db401ea4ab6c6ef162a66fa84ae2937cecdb
| 3,847
|
py
|
Python
|
oscontainer/cgroup_v2_subsystem.py
|
Lothiraldan/oscontainer
|
140504711372494f919b4de7bf84e80d11013fa0
|
[
"MIT"
] | null | null | null |
oscontainer/cgroup_v2_subsystem.py
|
Lothiraldan/oscontainer
|
140504711372494f919b4de7bf84e80d11013fa0
|
[
"MIT"
] | null | null | null |
oscontainer/cgroup_v2_subsystem.py
|
Lothiraldan/oscontainer
|
140504711372494f919b4de7bf84e80d11013fa0
|
[
"MIT"
] | null | null | null |
import math
from oscontainer.constants import CGROUP_TYPE_V2, PER_CPU_SHARES, NO_LIMIT
from oscontainer.cgroup_subsystem import CgroupController, CgroupSubsystem
from oscontainer.utils import limit_from_str
CPU_WEIGHT = "cpu.weight"
CPU_MAX = "cpu.max"
CPU_CPUSET_CPUS = "cpuset.cpus"
CPU_CPUSET_CPUS_EFFECTIVE = "cpuset.cpus.effective"
MEMORY_CURRENT = "memory.current"
MEMORY_MAX = "memory.max"
class CgroupV2Controller(CgroupController):
def __init__(self, mount_path, cgroup_path):
# type: (str, str) -> None
"""
Creates new cgroup V2 controller.
:param mount_path: the mount path of the cgroup v2 hierarchy
:param cgroup_path: the cgroup path for the controller
"""
super().__init__()
self.mount_path = mount_path
self.cgroup_path = cgroup_path
self.subsystem_path = self._create_subsystem_path(mount_path, cgroup_path)
@staticmethod
def _create_subsystem_path(mount_path, cgroup_path):
# type: (str, str) -> str
return mount_path + cgroup_path
class CgroupV2Subsystem(CgroupSubsystem):
"""
The implementation for cgroup V2
"""
def __init__(self, unified):
# type: (CgroupV2Controller) -> None
"""
Creates new instance.
:param unified: the unified cgroup controller
"""
self.unified = unified
def cpu_shares(self):
# type: () -> int
shares = int(self.unified.read_container_param(CPU_WEIGHT))
if shares == 100:
# Convert default value of 100 to no shares setup
return NO_LIMIT
# CPU shares (OCI) value needs to get translated into
# a proper Cgroups v2 value. See:
# https://github.com/containers/crun/blob/master/crun.1.md#cpu-controller
#
# Use the inverse of (x == OCI value, y == cgroupsv2 value):
# ((262142 * y - 1)/9999) + 2 = x
x = 262142 * shares - 1
frac = float(x) / 9999.0
x = int(frac) + 2
if x <= PER_CPU_SHARES:
# will always map to 1 CPU
return x
# Since the scaled value is not precise, return the closest
# multiple of PER_CPU_SHARES for a more conservative mapping
f = x / PER_CPU_SHARES
lower_multiple = math.floor(f) * PER_CPU_SHARES
upper_multiple = math.ceil(f) * PER_CPU_SHARES
distance_lower = max(lower_multiple, x) - min(lower_multiple, x)
distance_upper = max(upper_multiple, x) - min(upper_multiple, x)
if distance_lower <= distance_upper:
return lower_multiple
else:
return upper_multiple
def cpu_quota(self):
# type: () -> int
cpu_quota_res = self.unified.read_container_params_with_format(CPU_MAX, scan_format="%s %*d")
if len(cpu_quota_res) == 0:
return NO_LIMIT
return limit_from_str(cpu_quota_res[0])
def cpu_period(self):
# type: () -> int
cpu_period_res = self.unified.read_container_params_with_format(CPU_MAX, scan_format="%*s %d")
if len(cpu_period_res) == 0:
return NO_LIMIT
return cpu_period_res[0]
def cpu_cpuset_cpus(self):
# type: () -> str
cpuset = self.unified.read_container_param(CPU_CPUSET_CPUS)
if cpuset is None or cpuset == "":
cpuset = self.unified.read_container_param(CPU_CPUSET_CPUS_EFFECTIVE)
return cpuset
def memory_usage_in_bytes(self):
# type: () -> int
return int(self.unified.read_container_param(MEMORY_CURRENT))
def memory_limit_in_bytes(self):
# type: () -> int
memory_str = self.unified.read_container_param(MEMORY_MAX)
return limit_from_str(memory_str)
def container_type(self):
# type: () -> str
return CGROUP_TYPE_V2
| 34.044248
| 102
| 0.641279
| 3,442
| 0.894723
| 0
| 0
| 144
| 0.037432
| 0
| 0
| 1,092
| 0.283858
|
36d6b30d341d10b3fc5496de476fb8b78f692188
| 460
|
py
|
Python
|
openapi/tests/matchers.py
|
suihanki/openapi
|
c67ee4ec0284bc1da5bda2c6b8497d6a33bb69a0
|
[
"Apache-2.0"
] | 25
|
2016-08-09T18:40:42.000Z
|
2019-07-15T20:37:13.000Z
|
openapi/tests/matchers.py
|
suihanki/openapi
|
c67ee4ec0284bc1da5bda2c6b8497d6a33bb69a0
|
[
"Apache-2.0"
] | 5
|
2016-08-16T18:34:44.000Z
|
2020-03-24T21:01:26.000Z
|
openapi/tests/matchers.py
|
suihanki/openapi
|
c67ee4ec0284bc1da5bda2c6b8497d6a33bb69a0
|
[
"Apache-2.0"
] | 11
|
2016-09-14T09:12:49.000Z
|
2020-01-31T19:27:07.000Z
|
"""
Custom hamcrest matchers.
"""
from hamcrest.core.base_matcher import BaseMatcher
from json import dumps, loads
class JSONMatcher(BaseMatcher):
"""
Match JSON content.
"""
def __init__(self, s):
self.json = loads(s)
def _matches(self, item):
return loads(item) == self.json
def describe_to(self, description):
description.append_text("json ").append_text(dumps(self.json))
equal_to_json = JSONMatcher
| 18.4
| 70
| 0.671739
| 312
| 0.678261
| 0
| 0
| 0
| 0
| 0
| 0
| 76
| 0.165217
|
36d743457c72e522cd69762028d8c4a8aaf9d131
| 2,741
|
py
|
Python
|
build-container/docxify3.py
|
signaux-faibles/datapi
|
296ee922dc47eea4176f5c7bdde35c218bf9c817
|
[
"MIT"
] | null | null | null |
build-container/docxify3.py
|
signaux-faibles/datapi
|
296ee922dc47eea4176f5c7bdde35c218bf9c817
|
[
"MIT"
] | 31
|
2020-04-23T11:29:16.000Z
|
2021-06-23T05:45:08.000Z
|
build-container/docxify3.py
|
signaux-faibles/datapi
|
296ee922dc47eea4176f5c7bdde35c218bf9c817
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import sys
import json
from mailmerge import MailMerge
# Le template contient à ce jour les champs :
# auteur l'auteur du document
# date_edition la date d'édition du document
# confidentialite le destinataire du document
# raison_sociale la raison sociale de l'entreprise
# siret le numéro de SIRET de l'établissement
# type_etablissement le type d'établissement siège social ou établissement secondaire
# tete_de_groupe la tête de groupe si l'entreprise fait partie d'un groupe
# departement le departement de l'établissement
# commune la commune de l'établissement
# territoire_industrie le Territoire d'industrie
# secteur_activite le secteur d'activité
# activite le libellé et le code activité
# secteurs_covid appartenance aux secteurs dits COVID-19 S1, S1 bis ou S2
# statut_juridique le statut juridique comme SAS ou SARL
# date_ouverture_etablissement la date d'ouverture de l'établissement
# date_creation_entreprise la date de création de l'entreprise
# effectif le dernier effectif
# activite_partielle demande d'activité partielle sur les 12 derniers mois ou non
# dette_sociale dette sociale en hausse sur les 3 derniers mois ou non
# part_salariale dette salariale restante ou non
# annee_exercice année du dernier exercice comptable
# ca chiffre d'affaires
# ebe excédent brut d'exploitation
# rex résultat d'exploitation
# procol dernière procédure collective
# detection_sf risque identifié par l'algorithme de détection Signaux Faibles
# date_debut_suivi date de début de suivi par l'auteur
# description_wekan description dans l'outil de suivi Kanban Wekan
template = 'template.docx'
# Lecture des données JSON depuis l'entrée standard
def get_json_input_data():
try:
sys.stdin.reconfigure(encoding='utf-8')
read = sys.stdin.read()
data = json.loads(read)
return data
except ValueError:
sys.stderr.write('Erreur lors de la lecture des données JSON en entrée\n')
sys.exit(1)
# Remplissage du modèle DOCX contenant des champs de fusion (MERGEFIELD) et écriture dans la sortie standard
def fill_template_with_data(data):
try:
document = MailMerge(template)
# 3 arguments possibles :
# 1 = auteur, 2 = date_edition, 3 = confidentialite
args = len(sys.argv)
if args > 3:
confidentialite = sys.argv[3]
document.merge(confidentialite=confidentialite)
if args > 2:
date_edition = sys.argv[2]
document.merge(date_edition=date_edition)
if args > 1:
auteur = sys.argv[1]
document.merge(auteur=auteur)
document.merge_templates(data, separator='page_break')
document.write(sys.stdout.buffer)
except ValueError:
sys.stderr.write('Erreur lors du remplissage du modèle DOCX\n')
sys.exit(1)
data = get_json_input_data()
fill_template_with_data(data)
sys.exit(0)
| 38.069444
| 108
| 0.785845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,910
| 0.689033
|
36d88c360c0960445e0699b390c5bc46416d33e6
| 406
|
py
|
Python
|
super32assembler/super32assembler/preprocessor/asmdirectives.py
|
Projektstudium-Mikroprozessor/Super32
|
d502d2d5885ac0408d06e57e0f5a67fe2a2fee15
|
[
"BSD-3-Clause"
] | 1
|
2019-12-07T01:56:31.000Z
|
2019-12-07T01:56:31.000Z
|
super32assembler/super32assembler/preprocessor/asmdirectives.py
|
Projektstudium-Mikroprozessor/Super32
|
d502d2d5885ac0408d06e57e0f5a67fe2a2fee15
|
[
"BSD-3-Clause"
] | 42
|
2020-05-15T10:39:30.000Z
|
2020-08-30T10:59:43.000Z
|
super32assembler/preprocessor/asmdirectives.py
|
xsjad0/Super32
|
75cf5828b17cdbce144447a69ff3d1be7ad601f2
|
[
"BSD-3-Clause"
] | 4
|
2019-11-27T15:05:33.000Z
|
2020-05-13T06:51:21.000Z
|
"""
Enum Assembler-Directives
"""
from enum import Enum, auto
class AssemblerDirectives(Enum):
START = auto()
END = auto()
ORG = auto()
DEFINE = auto()
@classmethod
def to_string(cls):
return "{START},{END},{ORG},{DEFINE}".format(
START=cls.START.name,
END=cls.END.name,
ORG=cls.ORG.name,
DEFINE=cls.DEFINE.name
)
| 18.454545
| 53
| 0.549261
| 340
| 0.837438
| 0
| 0
| 229
| 0.564039
| 0
| 0
| 63
| 0.155172
|
36d90f9194a3f4a5adea321bf4cf9176ed0ded59
| 250
|
py
|
Python
|
Wyklad/OOP/Sheep.py
|
tborzyszkowski/PythonWorld
|
dc776d3ab4569297b6b6681e4390aeedf1262c78
|
[
"MIT"
] | 3
|
2019-02-24T07:49:33.000Z
|
2022-01-09T11:27:21.000Z
|
Wyklad/OOP/Sheep.py
|
tborzyszkowski/PythonWorld
|
dc776d3ab4569297b6b6681e4390aeedf1262c78
|
[
"MIT"
] | null | null | null |
Wyklad/OOP/Sheep.py
|
tborzyszkowski/PythonWorld
|
dc776d3ab4569297b6b6681e4390aeedf1262c78
|
[
"MIT"
] | 17
|
2018-12-09T08:44:52.000Z
|
2022-01-25T15:08:04.000Z
|
from Animal import Animal
class Sheep(Animal):
def __init__(self, sheep=None, position=None):
super(Sheep, self).__init__(sheep, position)
def clone(self):
return Sheep(self, None)
def initParams(self):
self.power = 3
self.sign = 'S'
| 16.666667
| 47
| 0.704
| 221
| 0.884
| 0
| 0
| 0
| 0
| 0
| 0
| 3
| 0.012
|
36da2c9e737689743389837a193464187fe5262e
| 154
|
py
|
Python
|
Python/WSClock/Page.py
|
Camiloasc1/OperativeSystemsUNAL
|
a07bfc235789b7a8848280a549a6b2c9602e61b5
|
[
"MIT"
] | null | null | null |
Python/WSClock/Page.py
|
Camiloasc1/OperativeSystemsUNAL
|
a07bfc235789b7a8848280a549a6b2c9602e61b5
|
[
"MIT"
] | null | null | null |
Python/WSClock/Page.py
|
Camiloasc1/OperativeSystemsUNAL
|
a07bfc235789b7a8848280a549a6b2c9602e61b5
|
[
"MIT"
] | null | null | null |
class Page(object):
def __init__(self, params):
self.size = 2 ** 10
self.Time = False
self.R = False
self.M = False
| 17.111111
| 31
| 0.519481
| 152
| 0.987013
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
36da2e9adf116505c11742d74e8d8a7c885d7c7d
| 1,034
|
py
|
Python
|
python/python-algorithm-intervew/8-linked-list/16-add-two-numbers-2.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | 1
|
2022-03-06T03:49:31.000Z
|
2022-03-06T03:49:31.000Z
|
python/python-algorithm-intervew/8-linked-list/16-add-two-numbers-2.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | null | null | null |
python/python-algorithm-intervew/8-linked-list/16-add-two-numbers-2.py
|
bum12ark/algorithm
|
b6e262b0c29a8b5fb551db5a177a40feebc411b4
|
[
"MIT"
] | null | null | null |
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def print_list(self):
cur = self
while cur:
print(cur.val, end='->')
cur = cur.next
class Solution:
# 전가산기구현
def addTwoNumbers(selfself, l1: ListNode, l2: ListNode) -> ListNode:
root = head = ListNode(0)
carry = 0 # 자리올림수
while l1 or l2 or carry:
sum = 0
# 두 입력값의 합 계산
if l1:
sum += l1.val
l1 = l1.next
if l2:
sum += l2.val
l2 = l2.next
# 몫 (자리올림수_과 나머지(값) 계산
carry, val = divmod(sum + carry, 10)
head.next = ListNode(val)
head = head.next
return root.next
if __name__ == '__main__':
solution = Solution()
param1 = ListNode(2, ListNode(4, ListNode(5)))
param2 = ListNode(5, ListNode(6, ListNode(4)))
print(solution.addTwoNumbers(param1, param2).print_list())
| 24.619048
| 72
| 0.500967
| 874
| 0.795993
| 0
| 0
| 0
| 0
| 0
| 0
| 128
| 0.116576
|
36dbe66f53ea99cba7463f1defbdf1646e602362
| 15,516
|
py
|
Python
|
pyjokes/jokes_pl.py
|
r0d0dendr0n/pyjokes
|
382065cba91007302be7fd04c5c35a9957e173b2
|
[
"BSD-3-Clause"
] | null | null | null |
pyjokes/jokes_pl.py
|
r0d0dendr0n/pyjokes
|
382065cba91007302be7fd04c5c35a9957e173b2
|
[
"BSD-3-Clause"
] | null | null | null |
pyjokes/jokes_pl.py
|
r0d0dendr0n/pyjokes
|
382065cba91007302be7fd04c5c35a9957e173b2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Jokes below come from the "jokes_en.py" file.
Translation to Polish: Tomasz Rozynek - provided under CC BY-SA 3.0
"""
neutral = [
"W 2030 roku Beata z ulgą usunęła Python'a 2.7 ze swoich maszyn. 'No!' westchnęła, by za chwilę przeczytać ogłoszenia na temat Python'a 4.4.",
"Zapytanie SQL wchodzi do baru, podchodzi do pierwszej osoby i pyta, 'Czy możemy utworzyć relację?'",
"Kiedy używasz C++ jak młotka, wszystko będzie Twoim kciukiem.",
"Jak posadzisz milion małp przy milionie klawiatur, któraś z nich w końcu napisze działający program w Javie. Pozostałe będą pisać w Perlu.",
"Aby zrozumieć rekurencję, musisz najpierw zrozumieć rekurencję.",
"'Puk, puk.' 'Kto tam?' ... bardzo długa pauza ... 'Java.'",
"'Puk, puk.' 'Kto tam?' 'C++.'",
"'Puk, p... Asembler.'",
"Ilu programistów potrzeba, żeby wymienić żarówkę? Żadnego, bo to problem sprzętowy.",
"Jak nazywa się obiektowa metoda bogacenia się? Dziedziczenie.",
"Dlaczego dowcipy nie działają w systemie ósemkowym? Ponieważ 7, 10, 11.",
"Ilu programistów potrzeba, aby wymienić żarówkę? Żadnego, po prostu ogłaszają ciemność standardem.",
"Dwa wątki wchodzą do baru. Barman patrzy na nie i woła, 'Hej! Nie chcemy tu hazardu!'",
"Programiści uwielbiają rozwiązywanie problemów. Jeśli akurat nie mają żadnego do rozwiązania, z pewnością jakiś stworzą.",
".NET nazywa się .NET, żeby przypadkiem nie wyświetlił się w uniksowym listingu plików.",
"Sprzęt: część komputera, którą możesz kopnąć.",
"Optymista: Szklanka do połowy pełna. Pesymista: Szklanka do połowy pusta. Programista: Rozmiar szklanki jest dwa razy większy, niż wymagany.",
"W C sami musieliśmy kodować błędy. W C++ możemy je po prostu odziedziczyć.",
"Dlaczego nie ma konkursów na najmniej czytelny kod w Perlu? Bo nikt nie umiałby wyłonić zwycięzcy.",
"Odtwarzając dysk instalacyjny Windowsa od tyłu, usłyszysz czarną mszę. Gorzej, jeśli odtworzysz ją od przodu, wtedy zainstaluje Windowsa.",
"Ilu programistów potrzeba, aby zabić karalucha? Dwóch: jeden go trzyma, a drugi instaluje na nim Windowsa.",
"Do jakiej grupy należą programiści z Finlandii? Nerdyckiej.",
"Co mówi kod w Javie do kodu w C? Brakuje Ci klasy.",
"Dlaczego Microsoft nazwał swoją wyszukiwarkę BING? Bo Indolentnie Naśladuje Google.",
"Piraci wołają 'arg!', komputerowi piraci wołają 'argv!'",
"Dziecko: Mamo, dlaczego Słońce wschodzi na wschodzie i zachodzi na zachodzie? Ojciec: jeśli działa, nie dotykaj.",
"Dlaczego programistom myli się Halloween z Bożym Narodzeniem? Ponieważ OCT 31 == DEC 25.",
"Ilu programistów Prologa potrzeba, żeby wymienić żarówkę? Fałsz.",
"Kelner: Podać kawę, lub herbatę? Programistka: Tak.",
"Programistka wchodzi do foo...",
"Jak brzmi drugie imię Benoit'a B. Mandelbrot'a? Benoit B. Mandelbrot.",
"Dlaczego zawsze się uśmiechasz? To moje regularne wyrażenie twarzy.",
"Programistka miała problem. Pomyślała sobie, 'Wiem, rozwiążę to wątkami!'. ma Teraz problemy. ona dwa",
"Opowiedziałbym dowcip o UDP, ale nie wiem, czy by do Ciebie dotarł.",
"Testerka wchodzi do baru. Wbiega do baru. Wczołguje się do baru. Tańczy wchodząc do baru. Wchodzi tip-topami do baru. Szarżuje do baru.",
"Miałem problem, więc pomyślałem, że użyję Javy. Teraz mam FabrykaProblemow.",
"Tester wchodzi do baru. Zamawia piwo. Zamawia 0 piw. Zamawia 999999999 piw. Zamawia jaszczurkę. Zamawia -1 piw. Zamawia sfdeljknesv.",
"Kierowniczka projektu wchodzi do baru, zamawia drinka. Barman odmawia, ale pomyśli nad dodaniem go później.",
"Jak wygenerować prawdziwie losowy ciąg znaków? Posadź studenta pierwszego roku przed Vim'em i powiedz, żeby zapisał plik i wyłączył edytor.",
"Od dłuższego czasu używam Vim'a. Głównie dlatego, że nadal próbuję go wyłączyć.",
"Jak poznać, że ktoś używa Vim'a? Nie przejmuj się, sam Ci powie.",
"Kelner: On się krztusi! Czy jest na sali doktor? Programista: Jestem użytkownikiem Vim'a.",
"Trójka adminów baz danych wchodzi do NoSQL'owego baru. Po krótkim czasie rozeszli się, ponieważ nie mogli utworzyć relacji.",
"Jak opisać fabułę Incepcji programiście? Uruchamiasz maszynę wirtualną w wirtualce, wewnątrz innej wirtualki... wszystko działa wolno!",
"W informatyce są tylko dwa trudne problemy: unieważnianie pamięci podręcznej, nazewnictwo i pomyłki o 1.",
"Istnieje 10 rodzajów ludzi: Ci, którzy rozumieją kod binarny oraz Ci, którzy go nie rozumieją.",
"Istnieją 2 rodzaje ludzi: Ci, którzy potrafią ekstrapolować niekompletne zbiory danych...",
"Istnieją II rodzaje ludzi: Ci, którzy rozumieją liczby rzymskie i Ci, którzy ich nie rozumieją.",
"Istnieje 10 typów ludzi: Ci, którzy rozumieją system szesnastkowy oraz 15 pozostałych.",
"Istnieje 10 rodzajów ludzi: Ci, którzy rozumieją kod binarny, Ci którzy go nie rozumieją oraz Ci, co wiedzieli, że to o systemie trójkowym.",
"Istnieje 10 rodzajów ludzi: Ci, którzy rozumieją kod trójkowy, Ci, którzy go nie rozumieją oraz Ci, którzy nigdy o nim nie słyszeli.",
"Jak nazywa się ósemka hobbitów? Hobbajt.",
"Najlepsze w wartościach logicznych jest to, że nawet jeśli się pomylisz, to tylko o 1.",
"Dobry programista zawsze patrzy w obie strony przed przejściem przez ulicę jednokierunkową.",
"Są dwa sposoby pisania programów bez błędów. Tylko ten trzeci działa.",
"Zarządzanie jakością składa się w 55% z wody, 30% krwi i 15% ticketów z bugtrackera",
"Sympatyzowanie z Diabłem to tak naprawdę bycie uprzejmym dla Testerów.",
"Ilu Testerów potrzeba do zmiany żarówki? Oni zauważyli, że pokój jest ciemny. Nie rozwiązują problemów, tylko ich szukają.",
"Programista rozbił auto zjeżdżając z góry. Przechodzień spytał co się stało. \"Nie wiem. Wnieśmy go na górę i spróbujmy ponownie.\".",
"Pisanie w PHP jest jak sikanie do basenu. Wszyscy to robili, ale niekoniecznie trzeba się tym chwalić publicznie.",
"Dlaczego Tester przeszedł przez ulicę? Żeby zepsuć dzień wszystkim innym.",
"Ilość dni od ostatniego błędu indeksowania tablicy: -1.",
"Ilość dni od ostatniej pomyłki o 1: 0.",
"Szybkie randki są bez sensu. 5 minut to zbyt mało czasu, aby prawidłowo wyjaśnić filozofię Unix'a.",
"Microsoft co dwa miesiące organizuje \"tydzień produktywności\", podczas którego używają Google zamiast Bing'a",
"Podejście Schroedinger'a do budowy stron www: Jeśli nie oglądasz tego w Internet Explorerze, jest szansa, że będzie wyglądało dobrze.",
"Szukanie dobrego programisty PHP jest jak szukanie igły w stogu siana. Czy raczej stogu siana w igle?",
"Unix jest bardzo przyjazny użytkownikom. Po prostu jest również bardzo wybredny przy wyborze przyjaciół.",
"Programistka COBOL'a zarabia miliony naprawiając problem roku 2000. Decyduje się zamrozić siebie. \"Mamy rok 9999. Znasz COBOL'a, prawda?\"",
"Język C łączy w sobie potęgę asemblera z prostotą użycia asemblera.",
"Ekspert SEO wchodzi do baru, bar, pub, miesce spotkań, browar, Irlandzki pub, tawerna, barman, piwo, gorzała, wino, alkohol, spirytus...",
"Co mają wspólnego pyjokes oraz Adobe Flash? Wciąż otrzymują aktualizacje, ale nigdy nie stają się lepsze.",
"Dlaczego Waldo nosi tylko paski? Bo nie chce się znaleźć w kropce.",
"Szedłem raz ulicą, przy której domy były ponumerowane 8k, 16k, 32k, 64k, 128k, 256k i 512k. To była podróż Aleją Pamięci.",
"!false, (To zabawne, bo to prawda)",
]
"""
Jokes below come from the "jokes_en.py" file.
Translation to Polish: Tomasz Rozynek - provided under CC BY-SA 3.0
"""
chuck = [
"Kiedy Chuck Norris rzuca wyjątek, to leci on przez cały pokój.",
"Wszystkie tablice, które deklaruje Chuck Norris są nieskończonego rozmiaru, ponieważ Chuck Norris nie zna granic.",
"Chuck Norris nie ma opóźnień w dysku twardym, ponieważ dysk twardy wie, że musi się spieszyć, żeby nie wkurzyć Chucka Norrisa.",
"Chuck Norris pisze kod, który sam się optymalizuje.",
"Chuck Norris nie porównuje, ponieważ nie ma sobie równych.",
"Chuck Norris nie potrzebuje garbage collector'a, ponieważ nie wywołuje .Dispose(), tylko .DropKick().",
"Pierwszym programem Chucka Norrisa było kill -9.",
"Chuck Norris przebił bańkę dot com'ów.",
"Wszystkie przeglądarki wspierają kolory #chuck oraz #norris, oznaczające czarny i niebieski.",
"MySpace tak naprawdę nie jest Twój, tylko Chuck'a. Po prostu pozwala Ci go używać.",
"Chuck Norris może pisać funkcje rekurencyjne bez warunku stopu, które zawsze wracają.",
"Chuck Norris może rozwiązać wieże Hanoi w jednym ruchu.",
"Chuck Norris zna tylko jeden wzorzec projektowy: Boski obiekt.",
"Chuck Norris ukończył World of Warcraft.",
"Kierownicy projektu nigdy nie pytają Chucka Norrisa o oszacowania.",
"Chuck Norris nie dostosowuje się do standardów webowych, ponieważ to one dostosowują się do niego.",
"'U mnie to działa' jest zawsze prawdą w przypadku Chucka Norrisa.",
"Chuck Norris nie używa diagramów wyżarzania, tylko uderzania.",
"Chuck Norris może usunąć Kosz.",
"Broda Chucka Norrisa może pisać 140 słów na minutę.",
"Chuck Norris może przetestować całą aplikację jedną asercją: 'działa'.",
"Chuck Norris nie szuka błędów, ponieważ to sugeruje, że może ich nie znaleźć. On likwiduje błędy.",
"Klawiatura Chucka Norris'a nie ma klawisza Ctrl, ponieważ nic nie kontroluje Chucka Norrisa.",
"Chuck Norris może przepełnić Twój stos samym spojrzeniem.",
"Dla Chucka Norrisa wszystko zawiera podatności.",
"Chuck Norris nie używa sudo. Powłoka wie, że to on i po prostu robi co jej każe.",
"Chuck Norris nie używa debuggera. Patrzy na kod tak długo, aż sam wyzna błędy.",
"Chuck Norris ma dostęp do prywatnych metod.",
"Chuck Norris może utworzyć obiekt klasy abstrakcyjnej.",
"Chuck Norris nie potrzebuje fabryki klas. On instancjonuje interfejsy.",
"Klasa Object dziedziczy po Chucku Norrisie.",
"Dla Chucka Norrisa problemy NP-trudne mają złożoność O(1).",
"Chuck Norris zna ostatnią cyfrę rozwinięcia dziesiętnego Pi.",
"Łącze internetowe Chucka Norrisa szybciej wysyła, niż pobiera, ponieważ nawet dane się go boją.",
"Chuck Norris rozwiązał problem komiwojażera w czasie stałym: rozbij komiwojażera na N kawałków, po czym wykop każdy do innego miasta.",
"Żadne wyrażenie nie może obsłużyć ChuckNorrisException.",
"Chuck Norris nie programuje w parach. Pracuje sam.",
"Chuck Norris potrafi pisać aplikacje wielowątkowe przy użyciu jednego wątku.",
"Chuck Norris nie musi używać AJAX'a, ponieważ strony i tak są przerażone jego zwykłymi żądaniami.",
"Chuck Norris nie używa refleksji. To refleksje uprzejmie proszą go o pomoc.",
"Klawiatura Chucka Norrisa nie ma klawisza Escape, ponieważ nikt nie ucieknie przed Chuckiem Norrisem.",
"Chuck Norris może użyć wyszukiwania binarnego na nieposortowanym kontenerze.",
"Chuck Norris nie musi łapać wyjątków. Są zbyt przerażone, by się pokazać.",
"Chuck Norris wyszedł z nieskończonej pętli.",
"Jeśli Chuck Norris napisze kod z błędami, to one same się poprawią.",
"Hosting Chucka Norrisa ma SLA na poziomie 101%.",
"Klawiatura Chucka Norrisa ma klawisz 'Dowolny'.",
"Chuck Norris może dostać się do bazy danych bezpośrednio przez interfejs użytkownika.",
"Programy Chucka Norrisa się nie kończą, tylko giną.",
"Chuck Norris nalega na używanie języków silnie typowanych.",
"Chuck Norris projektuje protokoły bez statusów, żądań, czy odpowiedzi. Definiuje tylko polecenia.",
"Programy Chucka Norrisa zajmują 150% procesora, nawet gdy nie są uruchomione.",
"Chuck Norris uruchamia wątki, które kończą swoje zadanie, zanim się poprawnie uruchomią.",
"Programy Chucka Norrisa nie akceptują wejścia.",
"Chuck Norris może zainstalować iTunes bez QuickTime'a.",
"Chuck Norris nie potrzebuje systemu operacyjnego.",
"Model OSI Chucka Norrisa ma tylko jedną warstwę - fizyczną.",
"Chuck Norris może poprawnie kompilować kod z błędami składniowymi.",
"Każde zapytanie SQL Chucka Norrisa zawiera implikowany 'COMMIT'.",
"Chuck Norris nie potrzebuje rzutowania. Kompilator Chucka Norrisa (KCN) dostrzega wszystko. Do samego końca. Zawsze.",
"Chuck Norris nie wykonuje kodu w cyklach, tylko w uderzeniach.",
"Chuck Norris kompresuje pliki przez kopnięcie dysku twardego z półobrotu.",
"Chuck Norris rozwiązał problem stopu.",
"Dla Chucka Norrisa P = NP. Jego decyzje są zawsze deterministyczne.",
"Chuck Norris może pobrać wszystko z /dev/null.",
"Nikomu nie udało się programować z Chuckiem Norrisem i wyjść z tego żywym.",
"Nikomu nie udało się odezwać podczas przeglądu kodu Chucka Norrisa i wyjść z tego żywym.",
"Chuck Norris nie używa interfejsów graficznych. On rozkazuje z wiersza poleceń.",
"Chuck Norris nie używa Oracle'a. On JEST Wyrocznią.",
"Chuck Norris może dokonać dereferencji NULL'a.",
"Lista różnic pomiędzy Twoim kodem oraz kodem Chucka Norrisa jest nieskończona.",
"Chuck Norris napisał wtyczkę do Eclipsa, która dokonała pierwszego kontaktu z obcą cywilizacją.",
"Chuck Norris jest ostatecznym semaforem. Wszystkie wątki się go boją.",
"Nie przejmuj się testami. Przypadki testowe Chucka Norrisa pokrywają również Twój kod.",
"Każdy włos z brody Chucka Norrisa ma swój wkład w największy na świecie atak DDOS.",
"Komunikaty w loggerze Chucka Norrisa zawsze mają poziom FATAL.",
"Jeśli Chuck Norris zepsuje build'a, nie uda Ci się go naprawić, ponieważ nie została ani jedna linijka kodu.",
"Chuck Norris pisze jednym palcem. Wskazuje nim na klawiaturę, a ona robi resztę roboty.",
"Programy Chucka Norrisa przechodzą test Turinga po prostu patrząc się na sędziego.",
"Jeśli spróbujesz zabić program Chucka Norrisa, to on zabije Ciebie.",
"Chuck Norris wykonuje nieskończone pętle w mniej niż 4 sekundy.",
"Chuck Norris może nadpisać zmienną zablokowaną semaforem.",
"Chuck Norris zna wartość NULL. Może też po niej sortować.",
"Chuck Norris może zainstalować 64-bitowy system operacyjny na 32-bitowych maszynach.",
"Chuck Norris może pisać do strumieni wyjściowych.",
"Chuck Norris może czytać ze strumieni wejściowych.",
"Chuck Norris nie musi kompilować swojego kodu. Maszyny nauczyły się interpretować kod Chuck Norrisa.",
"Chuck Norris jest powodem Niebieskiego Ekranu Śmierci.",
"Chuck Norris może utworzyć klasę, które jest jednocześnie abstrakcyjna i finalna.",
"Chuck Norris może użyć czegokolwiek z java.util.*, żeby Cię zabić. Nawet javadocs'ów.",
"Kod działa szybciej, gdy obserwuje go Chuck Norris.",
"Wszyscy lubią profil Chucka Norrisa na Facebook'u, czy im się to podoba, czy nie.",
"Nie możesz śledzić Chucka Norrisa na Twitterze, ponieważ to on śledzi Ciebie.",
"Kalkulator Chucka Norrisa ma tylko 3 klawisze: 0, 1 i NAND.",
"Chuck Norris używa tylko zmiennych globalnych. Nie ma nic do ukrycia.",
"Chuck Norris raz zaimplementował cały serwer HTTP, używając tylko jednego printf'a. Projekt wciąż się rozwija i jest znany pod nazwą Apache.",
"Chuck Norris pisze bezpośrednio w kodzie binarnym. Potem pisze kod źródłowy, jako dokumentację dla innych programistów.",
"Chuck Norris raz przesunął bit tak mocno, że wylądował w innym komputerze.",
"Jak nazywa się ulubiony framework Chucka Norrisa? Knockout.js.",
]
jokes_pl = {
'neutral': neutral,
'chuck': chuck,
'all': neutral + chuck,
}
| 77.969849
| 147
| 0.743942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15,071
| 0.929391
|
36dc5f7d17dd68b5094396174551645ca5e9fe1c
| 2,335
|
py
|
Python
|
pele_platform/Utilities/Helpers/launcher.py
|
gabrii/pele_platform
|
64ef9affdd1740fc2e0545c706f30eb2723aa300
|
[
"Apache-2.0"
] | null | null | null |
pele_platform/Utilities/Helpers/launcher.py
|
gabrii/pele_platform
|
64ef9affdd1740fc2e0545c706f30eb2723aa300
|
[
"Apache-2.0"
] | null | null | null |
pele_platform/Utilities/Helpers/launcher.py
|
gabrii/pele_platform
|
64ef9affdd1740fc2e0545c706f30eb2723aa300
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
import pele_platform.Checker.main as ck
import pele_platform.Frag.simulation as fr
import pele_platform.Adaptive.simulation as ad
from pele_platform.Allosteric.main import run_allosteric
import pele_platform.gpcr.main as gpcr
import pele_platform.out_in.main as outin
from pele_platform.PPI.main import run_ppi
import pele_platform.Utilities.Parameters.pele_env as pv
import argparse
@dataclass
class Launcher:
_args: argparse.ArgumentParser
frag: str="frag"
ppi: str="PPI"
allosteric: str="allosteric"
gpcr_orth: str="gpcr_orth"
out_in: str="out_in"
adaptive: str="adaptive"
def launch(self) -> pv.EnviroBuilder:
# Launch package from input.yaml
self._define_package_to_run()
job_variables = self.launch_package(self._args.package, no_check=self._args.no_check)
return job_variables
def launch_package(self, package: str, no_check=False) -> pv.EnviroBuilder:
# Launch package from API
if not no_check:
ck.check_executable_and_env_variables(self._args)
if package == self.adaptive:
job_variables = ad.run_adaptive(self._args)
elif package == self.gpcr_orth:
job_variables = gpcr.GpcrLauncher(self._args).run_gpcr_simulation()
elif package == self.out_in:
job_variables = outin.OutInLauncher(self._args).run_gpcr_simulation()
elif package == self.allosteric:
job_variables = run_allosteric(self._args)
elif package == self.ppi:
job_variables = run_ppi(self._args)
elif package == self.frag:
# Set variables and input ready
job_variables = fr.FragRunner(self._args).run_simulation()
return job_variables
def _define_package_to_run(self) -> None:
# Define package being run from input.yaml flags
if self._args.frag_core:
self._args.package = self.frag
elif self._args.ppi:
self._args.package = self.ppi
elif self._args.allosteric:
self._args.package = self.allosteric
elif self._args.gpcr_orth:
self._args.package = self.gpcr_orth
elif self._args.out_in:
self._args.package = self.out_in
else:
self._args.package = self.adaptive
| 37.063492
| 93
| 0.68137
| 1,903
| 0.814989
| 0
| 0
| 1,914
| 0.8197
| 0
| 0
| 188
| 0.080514
|
36dc9e14a8dd2c4fe9c4599b6173dd0c635c5f35
| 1,607
|
py
|
Python
|
Project/AuditReport/Aw/audit_aw.py
|
StarryHome/MultiToolsPlatform
|
2bd2b7e0700dbf542f0272ece3590a4afde328a4
|
[
"MIT"
] | null | null | null |
Project/AuditReport/Aw/audit_aw.py
|
StarryHome/MultiToolsPlatform
|
2bd2b7e0700dbf542f0272ece3590a4afde328a4
|
[
"MIT"
] | null | null | null |
Project/AuditReport/Aw/audit_aw.py
|
StarryHome/MultiToolsPlatform
|
2bd2b7e0700dbf542f0272ece3590a4afde328a4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .pandas_aw import PandasAw
class AuditAw(object):
"""
@summary: 审核AW类,负责审核规则整体流程
"""
def __init__(self):
self.result = []
def audit_report(self, visit_data_path, visit_demand_path, outliers_path):
"""
@summary: 审核报告入口
"""
# 1.获取数据源
visit_data_title, visit_data_list, visit_demand = self.read_excel(visit_data_path, visit_demand_path)
# 2.审核数据
for data in visit_data_list:
res = self.audit_process(data)
self.result.append(res)
# 3.写入异常值清单
return self.write_outliers(self.result, outliers_path)
def read_excel(self, visit_data_path, visit_demand_path):
"""
@summary: 读取excel数据
@param visit_data_path: 走访数据excel路径
@param visit_demand_path: 走访要求excel路径
"""
pd = PandasAw.get_instance()
pd.read(visit_data_path)
visit_data_title = pd.get_title()
visit_data_list = pd.get_data()
pd.read(visit_demand_path)
visit_demand = pd.get_data()
return visit_data_title, visit_data_list, visit_demand
def audit_process(self, data):
return 0
def write_outliers(self, result, outliers_path):
return True
if __name__ == '__main__':
audit = AuditAw()
visit_data_path = r'C:\Users\Administrator\Desktop\test\数据源.xlsx'
visit_demand_path = r'C:\Users\Administrator\Desktop\test\走访要求.xlsx'
outliers_path = r'C:\Users\Administrator\Desktop\test\异常值清单.xlsx'
audit.audit_report(visit_data_path, visit_demand_path, outliers_path)
| 30.903846
| 109
| 0.655881
| 1,315
| 0.757925
| 0
| 0
| 0
| 0
| 0
| 0
| 548
| 0.31585
|
36de3480ccf6ebc0ee5035bf6d2e1a0522de31d5
| 812
|
py
|
Python
|
libs/subsets_of_subset.py
|
nishio/atcoder
|
8db36537b5d8580745d5f98312162506ad7d7ab4
|
[
"MIT"
] | 1
|
2021-03-09T04:28:13.000Z
|
2021-03-09T04:28:13.000Z
|
libs/subsets_of_subset.py
|
nishio/atcoder
|
8db36537b5d8580745d5f98312162506ad7d7ab4
|
[
"MIT"
] | null | null | null |
libs/subsets_of_subset.py
|
nishio/atcoder
|
8db36537b5d8580745d5f98312162506ad7d7ab4
|
[
"MIT"
] | null | null | null |
"""
all subsets of given subset
"""
def subsets_of_subset(subset):
s = subset
superset = subset
while True:
yield s
s = (s - 1) & superset
if s == superset:
break
# --- end of library ---
def debugprint(g):
for x in g:
print(f"{x:06b}")
TEST_1 = """
>>> debugprint(subsets_of_subset(0b010101))
010101
010100
010001
010000
000101
000100
000001
000000
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if sys.argv[-1] == "-t":
_test()
sys.exit()
| 15.615385
| 59
| 0.566502
| 0
| 0
| 174
| 0.214286
| 0
| 0
| 0
| 0
| 197
| 0.242611
|
36df1c98bfbc556da9445e6a4a358b0bfd225e8a
| 9,312
|
py
|
Python
|
models/backbone.py
|
kakaobrain/sparse-detr
|
1ea7a062ca6d1dd57768d65b14352cfd1a65ab52
|
[
"Apache-2.0"
] | 83
|
2021-11-29T04:45:39.000Z
|
2022-03-30T13:39:46.000Z
|
models/backbone.py
|
kakaobrain/sparse-detr
|
1ea7a062ca6d1dd57768d65b14352cfd1a65ab52
|
[
"Apache-2.0"
] | 4
|
2021-12-18T21:24:50.000Z
|
2022-03-18T07:22:04.000Z
|
models/backbone.py
|
kakaobrain/sparse-detr
|
1ea7a062ca6d1dd57768d65b14352cfd1a65ab52
|
[
"Apache-2.0"
] | 3
|
2021-12-29T12:07:20.000Z
|
2022-02-11T08:26:16.000Z
|
# ------------------------------------------------------------------------------------
# Sparse DETR
# Copyright (c) 2021 KakaoBrain. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------------------
"""
Backbone modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from models import swin_transformer
from util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rsqrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n, eps=1e-5):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
self.eps = eps
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs):
num_batches_tracked_key = prefix + 'num_batches_tracked'
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict,
missing_keys, unexpected_keys, error_msgs)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = self.eps
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(self, backbone: nn.Module, train_backbone: bool, return_interm_layers: bool, args):
# TODO: args -> duplicated args
super().__init__()
if 'none' in args.backbone:
self.strides = [1] # not used, actually (length only matters)
self.num_channels = [3]
return_layers = self.get_return_layers('identity', (0,))
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
elif 'resnet' in args.backbone:
if not args.backbone_from_scratch and not args.finetune_early_layers:
print("Freeze early layers.")
for name, parameter in backbone.named_parameters():
if not train_backbone or all([k not in name for k in ['layer2', 'layer3', 'layer4']]):
parameter.requires_grad_(False)
else:
print('Finetune early layers as well.')
layer_name = "layer"
if return_interm_layers:
return_layers = self.get_return_layers(layer_name, (2, 3, 4))
self.strides = [8, 16, 32]
self.num_channels = [512, 1024, 2048]
else:
return_layers = self.get_return_layers(layer_name, (4,))
self.strides = [32]
self.num_channels = [2048]
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
elif 'swin' in args.backbone:
if return_interm_layers:
num_channels = [int(backbone.embed_dim * 2 ** i) for i in range(backbone.num_layers)]
return_layers = [2, 3, 4]
self.strides = [8, 16, 32]
self.num_channels = num_channels[1:]
else:
return_layers = [4]
self.strides = [32]
self.num_channels = num_channels[-1]
self.body = backbone
else:
raise ValueError(f"Unknown backbone name: {args.backbone}")
@staticmethod
def get_return_layers(name: str, layer_ids):
return {name + str(n): str(i) for i, n in enumerate(layer_ids)}
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
return out
class DummyBackbone(torch.nn.Module):
def __init__(self):
super().__init__()
self.identity0 = torch.nn.Identity()
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(self, name: str,
train_backbone: bool,
return_interm_layers: bool,
dilation: bool,
args):
print(f"Backbone: {name}")
pretrained = is_main_process() and not args.backbone_from_scratch and not args.scrl_pretrained_path
if not pretrained:
print("Train backbone from scratch.")
else:
print("Load pretrained weights")
if "none" in name:
backbone = DummyBackbone()
elif "resnet" in name:
assert name not in ("resnet18", "resnet34"), "number of channels are hard coded"
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=pretrained, norm_layer=FrozenBatchNorm2d)
elif "swin" in name:
assert not dilation, "not supported"
if not args.backbone_from_scratch and not args.finetune_early_layers:
print("Freeze early layers.")
frozen_stages = 2
else:
print('Finetune early layers as well.')
frozen_stages = -1
if return_interm_layers:
out_indices = [1, 2, 3]
else:
out_indices = [3]
backbone = swin_transformer.build_model(
name, out_indices=out_indices, frozen_stages=frozen_stages, pretrained=pretrained)
else:
raise ValueError(f"Unknown backbone name: {args.backbone}")
if args.scrl_pretrained_path:
assert "resnet" in name, "Currently only resnet50 is available."
ckpt = torch.load(args.scrl_pretrained_path, map_location="cpu")
translate_map = {
"encoder.0" : "conv1",
"encoder.1" : "bn1",
"encoder.4" : "layer1",
"encoder.5" : "layer2",
"encoder.6" : "layer3",
"encoder.7" : "layer4",
}
state_dict = {
translate_map[k[:9]] + k[9:] : v
for k, v in ckpt["online_network_state_dict"].items()
if "encoder" in k
}
backbone.load_state_dict(state_dict, strict=False)
super().__init__(backbone, train_backbone, return_interm_layers, args)
if dilation and "resnet" in name:
self.strides[-1] = self.strides[-1] // 2
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
self.strides = backbone.strides
self.num_channels = backbone.num_channels
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in sorted(xs.items()):
out.append(x)
# position encoding
for x in out:
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def test_backbone(backbone):
imgs = [
torch.randn(2, 3, 633, 122),
torch.randn(2, 3, 322, 532),
torch.randn(2, 3, 236, 42),
]
return [backbone(img).shape for img in imgs]
def build_backbone(args):
# test_backbone(torchvision.models.resnet50())
position_embedding = build_position_encoding(args)
train_backbone = args.lr_backbone > 0
return_interm_layers = args.masks or (args.num_feature_levels > 1)
backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation, args)
model = Joiner(backbone, position_embedding)
return model
| 38.962343
| 107
| 0.578393
| 7,508
| 0.806271
| 0
| 0
| 134
| 0.01439
| 0
| 0
| 1,966
| 0.211125
|
36dfbf17404b4b4327586ca76fafeed5dd3aea90
| 496
|
py
|
Python
|
resonate-carla/risk_calculation/function_test.py
|
scope-lab-vu/Resonate-Dynamic-Risk
|
46972bdb0a2b6b08cc188a9f1f6567971c9d263d
|
[
"MIT"
] | 3
|
2021-08-15T05:02:17.000Z
|
2022-03-16T11:25:45.000Z
|
resonate-carla/risk_calculation/function_test.py
|
scope-lab-vu/Resonate-Dynamic-Risk
|
46972bdb0a2b6b08cc188a9f1f6567971c9d263d
|
[
"MIT"
] | null | null | null |
resonate-carla/risk_calculation/function_test.py
|
scope-lab-vu/Resonate-Dynamic-Risk
|
46972bdb0a2b6b08cc188a9f1f6567971c9d263d
|
[
"MIT"
] | 2
|
2021-03-21T02:35:17.000Z
|
2021-06-02T22:40:07.000Z
|
from bowtie_diagram import BowTie
import matplotlib.pyplot as plt
EXAMPLE_MONITOR_VALUES = [x for x in range(-5, 21)]
bowtie = BowTie()
state = {"monitor_values": {"lec_martingale": None}}
true_y_vals = []
true_x_vals = []
for x_val in EXAMPLE_MONITOR_VALUES:
true_x_vals.append(x_val)
state["monitor_values"]["lec_martingale"] = x_val
true_y_vals.append(bowtie.prob_b1(state))
plt.scatter(true_x_vals, true_y_vals)
plt.xlabel("Log Martingale")
plt.ylabel("P(B1 | S)")
plt.show()
| 24.8
| 53
| 0.737903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.183468
|
36dfdaf21f66ae9305bc8e42cb69c1de214c4d13
| 3,760
|
py
|
Python
|
hw_asr/model/dsmodel.py
|
ivan-gorin/asr_project_template
|
6a9f908d7f287ac2a66d5740fa6c73133825845a
|
[
"MIT"
] | null | null | null |
hw_asr/model/dsmodel.py
|
ivan-gorin/asr_project_template
|
6a9f908d7f287ac2a66d5740fa6c73133825845a
|
[
"MIT"
] | null | null | null |
hw_asr/model/dsmodel.py
|
ivan-gorin/asr_project_template
|
6a9f908d7f287ac2a66d5740fa6c73133825845a
|
[
"MIT"
] | null | null | null |
#from https://www.assemblyai.com/blog/end-to-end-speech-recognition-pytorch/
from torch import nn
import torch.nn.functional as F
from hw_asr.base import BaseModel
class CNNLayerNorm(nn.Module):
def __init__(self, n_feats):
super().__init__()
self.layer_norm = nn.LayerNorm(n_feats)
def forward(self, x):
# x (batch, channel, feature, time)
x = x.transpose(2, 3).contiguous() # (batch, channel, time, feature)
x = self.layer_norm(x)
return x.transpose(2, 3).contiguous() # (batch, channel, feature, time)
class ResidualCNN(nn.Module):
"""inspired by https://arxiv.org/pdf/1603.05027.pdf
"""
def __init__(self, in_channels, out_channels, kernel, stride, dropout, n_feats):
super().__init__()
self.do_residual = in_channels != out_channels
if self.do_residual:
self.residual = nn.Conv2d(in_channels, out_channels, kernel_size=1)
self.net = nn.Sequential(
CNNLayerNorm(n_feats),
nn.GELU(),
nn.Dropout(dropout),
nn.Conv2d(in_channels, out_channels, kernel_size=kernel, stride=stride, padding=kernel//2),
CNNLayerNorm(n_feats),
nn.GELU(),
nn.Dropout(dropout),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel, stride=stride, padding=kernel // 2)
)
def forward(self, x):
if self.do_residual:
residual = self.residual(x)
else:
residual = x
x = self.net(x)
x += residual
return x # (batch, channel, feature, time)
class BidirectionalGRU(nn.Module):
def __init__(self, rnn_dim, hidden_size, dropout, batch_first=True):
super().__init__()
self.BiGRU = nn.GRU(
input_size=rnn_dim, hidden_size=hidden_size,
num_layers=1, batch_first=batch_first, bidirectional=True)
self.layer_norm = nn.LayerNorm(rnn_dim)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
x = self.layer_norm(x)
x = F.gelu(x)
x, _ = self.BiGRU(x)
x = self.dropout(x)
return x
class DeepSpeechModel(BaseModel):
def __init__(self, n_cnn_layers, n_rnn_layers, rnn_dim, n_class, n_feats, stride=2, kernel_size=3, dropout=0.1):
super(DeepSpeechModel, self).__init__(n_feats, n_class)
n_feats = n_feats // 2
self.cnn = nn.Conv2d(1, 32, kernel_size=3, stride=stride, padding=kernel_size // 2)
layers = []
for _ in range(n_cnn_layers):
layers.append(ResidualCNN(32, 32, kernel=3, stride=1, dropout=dropout, n_feats=n_feats))
self.cnn_net = nn.Sequential(*layers)
self.fully_connected = nn.Linear(n_feats * 32, rnn_dim)
layers = [BidirectionalGRU(rnn_dim=rnn_dim, hidden_size=rnn_dim, dropout=dropout)]
for _ in range(n_rnn_layers - 1):
layers.append(BidirectionalGRU(rnn_dim=rnn_dim*2, hidden_size=rnn_dim, dropout=dropout))
self.rnn_net = nn.Sequential(*layers)
self.classifier = nn.Sequential(
nn.Linear(rnn_dim * 2, rnn_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(rnn_dim, n_class)
)
def forward(self, spectrogram, *args, **kwargs):
x = spectrogram.transpose(1, 2).unsqueeze(1)
x = self.cnn(x)
x = self.cnn_net(x)
sizes = x.size()
x = x.view(sizes[0], sizes[1] * sizes[2], sizes[3]) # (batch, feature, time)
x = x.transpose(1, 2) # (batch, time, feature)
x = self.fully_connected(x)
x = self.rnn_net(x)
x = self.classifier(x)
return x
def transform_input_lengths(self, input_lengths):
return input_lengths // 2
| 35.471698
| 116
| 0.614894
| 3,582
| 0.95266
| 0
| 0
| 0
| 0
| 0
| 0
| 317
| 0.084309
|
36e0a5cff93ebca1eb7f6ddcf88fb764dd56d580
| 9,198
|
py
|
Python
|
reminders/menu.py
|
elanorstark/reminder_pi
|
e6419f9bce29a1a06e0fee1b9e79156779a08c8b
|
[
"MIT"
] | null | null | null |
reminders/menu.py
|
elanorstark/reminder_pi
|
e6419f9bce29a1a06e0fee1b9e79156779a08c8b
|
[
"MIT"
] | null | null | null |
reminders/menu.py
|
elanorstark/reminder_pi
|
e6419f9bce29a1a06e0fee1b9e79156779a08c8b
|
[
"MIT"
] | null | null | null |
import datetime
from typing import List
from reminders.events import Buttons, Alerts
from reminders.screen import Screen
# highest level, things that can be in a list menu
class ListMenuItem:
def __init__(self, name):
self._name = str(name)
@property
def name(self):
return self._name
def set_name(self, name):
self._name = str(name)
def selected(self):
pass
# an item in a menu that does something other than going to another menu
class ActionItem(ListMenuItem):
def __init__(self, name, action):
super().__init__(name)
self.action = action
def selected(self):
self.action()
# an action item that is displayed on a menu with a checkbox
class ToggleableItem(ActionItem):
def __init__(self, name, is_selected, toggle, pad_width=9):
super().__init__(name.ljust(pad_width), toggle)
self.is_selected = is_selected
@property
def name(self):
return self._name + ("[×]" if self.is_selected() else "[ ]")
# parent for menus that can be displayed as their own screen
class Menu(ListMenuItem):
menu_stack = []
def __init__(self, name):
super().__init__(name)
def display(self):
Screen.text_screen(self.name + "\n" + "-" * len(self.name))
def handle_button_press(self, button):
pass
def handle_time(self):
pass
# returns current menu, ie top of stack
@staticmethod
def current():
return Menu.menu_stack[-1]
# adds the top level menu to the stack
@staticmethod
def initialise(menu):
Menu.menu_stack = [menu]
# when back button is pressed - go back to previous level of menu
@staticmethod
def back():
if len(Menu.menu_stack) > 1:
Menu.menu_stack.pop()
# menu for the home screen
# no back button available
class HomeMenu(Menu):
translation = Buttons.home_menu_buttons
def __init__(self, main_menu):
super().__init__("Home")
self.main_menu = main_menu
def handle_time(self):
self.display()
def handle_button_press(self, button):
button = HomeMenu.translation[button]
if button == "home":
# go to main menu
Menu.menu_stack.append(self.main_menu)
elif button == "backlight":
Menu.menu_stack.append(BacklightOffMenu())
def display(self):
now = datetime.datetime.now()
Screen.home_screen(self.name, now.strftime("%H:%M"), now.strftime("%a %d %b"))
# menu that stores and displays a list of ListMenuItem
class ListMenu(Menu):
translation = Buttons.list_menu_buttons
# initialise a MenuList
def __init__(self, name: str, items):
super().__init__(name)
self.unevaluated = items
self.items: List[ListMenuItem] = [ActionItem("..", Menu.back)]
self.position = 0
# decides what to do depending on which button was pressed
# a = select, b = up menu, y = down menu, x = home screen
def handle_button_press(self, button):
button = ListMenu.translation[button]
if button == "select":
# select
self.items[self.position].selected()
elif button == "up":
# up
self.position -= 1
self.position %= len(self.items)
elif button == "down":
# down
self.position += 1
self.position %= len(self.items)
elif button == "home":
# home/toplevel button
Menu.menu_stack = Menu.menu_stack[:1]
# displays menu on screen
def display(self, title=None):
if not title:
title = self.name
self.items = [ActionItem("..", Menu.back)] + self.unevaluated()
self.position = min(len(self.items) - 1, self.position)
text = ""
for i, item in enumerate(self.items):
if i == self.position:
text += "> {}\n".format(item.name)
else:
text += " {}\n".format(item.name)
print(title, "\n", text)
Screen.menu_screen(title, text)
# adds menu to the stack when selected
def selected(self):
Menu.menu_stack.append(self)
self.position = 0
# menu for reaching the task time editing menu, and to edit on and complete
class TaskMenu(ListMenu):
def __init__(self, task):
self.task = task
super().__init__(self.task.name, self.task_options)
def display(self, title=None):
title = "Edit " + self.name
super(TaskMenu, self).display(title)
def task_options(self):
options = [
TimeMenu(self.task),
ToggleableItem("On", lambda: self.task.on, self.task.on_toggle)
]
if self.task.on:
options.append(ToggleableItem("Complete", lambda: self.task.complete, self.task.complete_toggle))
return options
# menu for editing a task's time
class TimeMenu(ListMenu):
units_stages = [1, 5, 10]
menu_stages = ["Hours", "Minutes", "Save/Cancel"]
translation = Buttons.time_menu_buttons
def __init__(self, task):
super().__init__(task.get_task_time().strftime("Time %H:%M"), lambda: [])
self.task = task
self.time = task.get_task_time()
self.menu_stage = 0
self.units_stage = 0
def display(self, title="Edit Time"):
Screen.multi_line_text(
[Screen.TextLine(title, 1),
Screen.TextLine("Unit change: {}".format(TimeMenu.units_stages[self.units_stage]), 0),
Screen.TextLine(self.time.strftime("%H:%M"), 2, align="c"),
Screen.TextLine(TimeMenu.menu_stages[self.menu_stage], 1, align="c")])
def change_task_time(self):
self.menu_stage = 0
self.task.set_task_time(self.task.get_task_time().replace(hour=self.time.hour, minute=self.time.minute))
self.set_name(self.time.strftime("Time %H:%M"))
Alerts.sort_alerts()
def hour_change(self, difference):
self.time = self.time.replace(hour=(self.time.hour + difference) % 24)
def minute_change(self, difference):
self.time = self.time.replace(minute=(self.time.minute + difference) % 60)
def handle_button_press(self, button):
button = TimeMenu.translation[button]
if button == "next":
self.menu_stage += 1
self.menu_stage %= len(TimeMenu.menu_stages)
if button == "decrease":
if TimeMenu.menu_stages[self.menu_stage] == "Hours":
self.hour_change(-1)
elif TimeMenu.menu_stages[self.menu_stage] == "Minutes":
self.minute_change(0 - TimeMenu.units_stages[self.units_stage])
elif TimeMenu.menu_stages[self.menu_stage] == "Save/Cancel":
self.change_task_time()
super().handle_button_press("a")
if button == "units":
self.units_stage += 1
self.units_stage %= len(TimeMenu.units_stages)
if button == "increase":
if TimeMenu.menu_stages[self.menu_stage] == "Hours":
self.hour_change(1)
elif TimeMenu.menu_stages[self.menu_stage] == "Minutes":
self.minute_change(TimeMenu.units_stages[self.units_stage])
elif TimeMenu.menu_stages[self.menu_stage] == "Save/Cancel":
super().handle_button_press("a")
def selected(self):
super().selected()
self.menu_stage = 0
self.units_stage = 0
# menu which is put at top of stack when backlight is turned off
class BacklightOffMenu(Menu):
def __init__(self):
super().__init__("Backlight")
def display(self):
Screen.off()
def handle_button_press(self, button):
if button == "x":
Menu.menu_stack.pop()
Screen.toggle_backlight()
# menu to display alert and delay or mark complete
class AlertMenu(Menu):
translation = Buttons.alert_menu_buttons
def __init__(self, task, delay=datetime.timedelta(minutes=1)):
super().__init__(task.name)
self.task = task
self.delayed_for = 0
self.delay_period = delay
def display(self):
if self.delayed_for > 0:
Screen.multi_line_text(
[Screen.TextLine(self.name, 1), Screen.TextLine("Delaying until:", 0, uniform_y=True),
Screen.TextLine(self.task.get_task_time().strftime("%H:%M"), 1),
Screen.TextLine(" ", 0), Screen.TextLine("Delayed for", 0),
Screen.TextLine(str(self.delayed_for * self.delay_period), 0)])
else:
Screen.multi_line_text(
[Screen.TextLine(self.name, 1), Screen.TextLine("Alert time:", 0, uniform_y=True),
Screen.TextLine(self.task.get_task_time().strftime("%H:%M"), 1)])
def handle_button_press(self, button):
button = AlertMenu.translation[button]
if button == "dismiss":
Menu.menu_stack.pop()
elif button == "delay":
self.task.delay(self.delay_period)
self.delayed_for += 1
self.display()
elif button == "complete":
self.task.complete_toggle()
| 31.717241
| 112
| 0.607741
| 8,466
| 0.920317
| 0
| 0
| 393
| 0.042722
| 0
| 0
| 1,401
| 0.152299
|
36e0bc6b9fd730df1ea36e866d1ae6f2849b3e37
| 2,127
|
py
|
Python
|
custom_components/goldair_climate/configuration.py
|
jwhite/homeassistant-goldair-climate
|
cca1831a1d257c507f3831ca053478e1e7643c75
|
[
"MIT"
] | 8
|
2019-05-31T00:17:13.000Z
|
2021-01-12T21:43:21.000Z
|
custom_components/goldair_climate/configuration.py
|
jwhite/homeassistant-goldair-climate
|
cca1831a1d257c507f3831ca053478e1e7643c75
|
[
"MIT"
] | 25
|
2019-07-04T06:46:30.000Z
|
2021-07-15T03:13:46.000Z
|
custom_components/goldair_climate/configuration.py
|
nicole-ashley/homeassistant-goldair-climate
|
df5f895db20d826b673142e785477944497d9402
|
[
"MIT"
] | 9
|
2019-11-22T02:46:55.000Z
|
2021-07-04T21:57:41.000Z
|
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_NAME
from .const import (
CONF_CHILD_LOCK,
CONF_CLIMATE,
CONF_DEVICE_ID,
CONF_DISPLAY_LIGHT,
CONF_LOCAL_KEY,
CONF_TYPE,
CONF_TYPE_AUTO,
CONF_TYPE_DEHUMIDIFIER,
CONF_TYPE_FAN,
CONF_TYPE_GECO_HEATER,
CONF_TYPE_GPCV_HEATER,
CONF_TYPE_GPPH_HEATER,
)
INDIVIDUAL_CONFIG_SCHEMA_TEMPLATE = [
{"key": CONF_NAME, "type": str, "required": True, "option": False},
{"key": CONF_HOST, "type": str, "required": True, "option": True},
{"key": CONF_DEVICE_ID, "type": str, "required": True, "option": False},
{"key": CONF_LOCAL_KEY, "type": str, "required": True, "option": True},
{
"key": CONF_TYPE,
"type": vol.In(
[
CONF_TYPE_AUTO,
CONF_TYPE_GPPH_HEATER,
CONF_TYPE_DEHUMIDIFIER,
CONF_TYPE_FAN,
CONF_TYPE_GECO_HEATER,
CONF_TYPE_GPCV_HEATER,
]
),
"required": False,
"default": CONF_TYPE_AUTO,
"option": True,
},
{
"key": CONF_CLIMATE,
"type": bool,
"required": False,
"default": True,
"option": True,
},
{
"key": CONF_DISPLAY_LIGHT,
"type": bool,
"required": False,
"default": False,
"option": True,
},
{
"key": CONF_CHILD_LOCK,
"type": bool,
"required": False,
"default": False,
"option": True,
},
]
def individual_config_schema(defaults={}, options_only=False):
output = {}
for prop in INDIVIDUAL_CONFIG_SCHEMA_TEMPLATE:
if options_only and not prop.get("option"):
continue
options = {}
default = defaults.get(prop["key"], prop.get("default"))
if default is not None:
options["default"] = default
key = (
vol.Required(prop["key"], **options)
if prop["required"]
else vol.Optional(prop["key"], **options)
)
output[key] = prop["type"]
return output
| 25.023529
| 76
| 0.550071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 325
| 0.152797
|
36e0e9a9d4e28937589b02ccb2d38ccef3931ed6
| 255
|
py
|
Python
|
20-29/nlp23.py
|
emergent/nlp100.python
|
636546ce1c4368faa6685eec315773c5c9fb424a
|
[
"Apache-2.0"
] | null | null | null |
20-29/nlp23.py
|
emergent/nlp100.python
|
636546ce1c4368faa6685eec315773c5c9fb424a
|
[
"Apache-2.0"
] | null | null | null |
20-29/nlp23.py
|
emergent/nlp100.python
|
636546ce1c4368faa6685eec315773c5c9fb424a
|
[
"Apache-2.0"
] | null | null | null |
from nlp20 import get_england
import re
str = get_england()
lines = str.split('\n')
p = re.compile(r'^(=+)\s*(.+?)\s*=+')
for l in lines:
m = re.search(p, l)
if m is not None:
level = len(m.group(1)) - 1
print(m.group(2), level)
| 19.615385
| 37
| 0.556863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.098039
|
36e117d0d57d57188bd69bce4d500df94875ceb8
| 4,913
|
py
|
Python
|
platform_reports/prometheus_grammars.py
|
neuro-inc/platform-reports
|
161c18733370235af0b63a772de49343e956c35c
|
[
"Apache-2.0"
] | null | null | null |
platform_reports/prometheus_grammars.py
|
neuro-inc/platform-reports
|
161c18733370235af0b63a772de49343e956c35c
|
[
"Apache-2.0"
] | 9
|
2021-12-23T03:10:40.000Z
|
2022-03-31T03:15:52.000Z
|
platform_reports/prometheus_grammars.py
|
neuro-inc/platform-reports
|
161c18733370235af0b63a772de49343e956c35c
|
[
"Apache-2.0"
] | null | null | null |
PROMQL = """
start: query
// Binary operations are defined separately in order to support precedence
?query\
: or_match
| matrix
| subquery
| offset
?or_match\
: and_unless_match
| or_match OR grouping? and_unless_match
?and_unless_match\
: comparison_match
| and_unless_match (AND | UNLESS) grouping? comparison_match
?comparison_match\
: sum_match
| comparison_match /==|!=|>=|<=|>|</ BOOL? grouping? sum_match
?sum_match\
: product_match
| sum_match /\\+|-/ grouping? product_match
?product_match\
: unary
| product_match /\\*|\\/|%/ grouping? unary
?unary\
: power_match
| /\\+|-/ power_match
?power_match\
: atom
| atom /\\^/ grouping? power_match
?atom\
: function
| aggregation
| instant_query
| NUMBER
| STRING
| "(" query ")"
// Selectors
instant_query\
: METRIC_NAME ("{" label_matcher_list? "}")? -> instant_query_with_metric
| "{" label_matcher_list "}" -> instant_query_without_metric
label_matcher_list: label_matcher ("," label_matcher)*
label_matcher: label_name /=~|=|!=|!~/ STRING
matrix: query "[" DURATION "]"
subquery: query "[" DURATION ":" DURATION? "]"
offset: query OFFSET DURATION
// Function
function: function_name parameter_list
parameter_list: "(" (query ("," query)*)? ")"
?function_name\
: ABS
| ABSENT
| ABSENT_OVER_TIME
| CEIL
| CHANGES
| CLAMP_MAX
| CLAMP_MIN
| DAY_OF_MONTH
| DAY_OF_WEEK
| DAYS_IN_MONTH
| DELTA
| DERIV
| EXP
| FLOOR
| HISTOGRAM_QUANTILE
| HOLT_WINTERS
| HOUR
| IDELTA
| INCREASE
| IRATE
| LABEL_JOIN
| LABEL_REPLACE
| LN
| LOG2
| LOG10
| MINUTE
| MONTH
| PREDICT_LINEAR
| RATE
| RESETS
| ROUND
| SCALAR
| SORT
| SORT_DESC
| SQRT
| TIME
| TIMESTAMP
| VECTOR
| YEAR
| AVG_OVER_TIME
| MIN_OVER_TIME
| MAX_OVER_TIME
| SUM_OVER_TIME
| COUNT_OVER_TIME
| QUANTILE_OVER_TIME
| STDDEV_OVER_TIME
| STDVAR_OVER_TIME
// Aggregations
aggregation\
: aggregation_operator parameter_list
| aggregation_operator (by | without) parameter_list
| aggregation_operator parameter_list (by | without)
by: BY label_name_list
without: WITHOUT label_name_list
?aggregation_operator\
: SUM
| MIN
| MAX
| AVG
| GROUP
| STDDEV
| STDVAR
| COUNT
| COUNT_VALUES
| BOTTOMK
| TOPK
| QUANTILE
// Vector one-to-one/one-to-many joins
grouping: (on | ignoring) (group_left | group_right)?
on: ON label_name_list
ignoring: IGNORING label_name_list
group_left: GROUP_LEFT label_name_list
group_right: GROUP_RIGHT label_name_list
// Label names
label_name_list: "(" (label_name ("," label_name)*)? ")"
?label_name: keyword | LABEL_NAME
?keyword\
: AND
| OR
| UNLESS
| BY
| WITHOUT
| ON
| IGNORING
| GROUP_LEFT
| GROUP_RIGHT
| OFFSET
| BOOL
| aggregation_operator
| function_name
// Keywords
// Function names
ABS: "abs"
ABSENT: "absent"
ABSENT_OVER_TIME: "absent_over_time"
CEIL: "ceil"
CHANGES: "changes"
CLAMP_MAX: "clamp_max"
CLAMP_MIN: "clamp_min"
DAY_OF_MONTH: "day_of_month"
DAY_OF_WEEK: "day_of_week"
DAYS_IN_MONTH: "days_in_month"
DELTA: "delta"
DERIV: "deriv"
EXP: "exp"
FLOOR: "floor"
HISTOGRAM_QUANTILE: "histogram_quantile"
HOLT_WINTERS: "holt_winters"
HOUR: "hour"
IDELTA: "idelta"
INCREASE: "increase"
IRATE: "irate"
LABEL_JOIN: "label_join"
LABEL_REPLACE: "label_replace"
LN: "ln"
LOG2: "log2"
LOG10: "log10"
MINUTE: "minute"
MONTH: "month"
PREDICT_LINEAR: "predict_linear"
RATE: "rate"
RESETS: "resets"
ROUND: "round"
SCALAR: "scalar"
SORT: "sort"
SORT_DESC: "sort_desc"
SQRT: "sqrt"
TIME: "time"
TIMESTAMP: "timestamp"
VECTOR: "vector"
YEAR: "year"
AVG_OVER_TIME: "avg_over_time"
MIN_OVER_TIME: "min_over_time"
MAX_OVER_TIME: "max_over_time"
SUM_OVER_TIME: "sum_over_time"
COUNT_OVER_TIME: "count_over_time"
QUANTILE_OVER_TIME: "quantile_over_time"
STDDEV_OVER_TIME: "stddev_over_time"
STDVAR_OVER_TIME: "stdvar_over_time"
// Aggregation operators
SUM: "sum"
MIN: "min"
MAX: "max"
AVG: "avg"
GROUP: "group"
STDDEV: "stddev"
STDVAR: "stdvar"
COUNT: "count"
COUNT_VALUES: "count_values"
BOTTOMK: "bottomk"
TOPK: "topk"
QUANTILE: "quantile"
// Aggregation modifiers
BY: "by"
WITHOUT: "without"
// Join modifiers
ON: "on"
IGNORING: "ignoring"
GROUP_LEFT: "group_left"
GROUP_RIGHT: "group_right"
// Logical operators
AND: "and"
OR: "or"
UNLESS: "unless"
OFFSET: "offset"
BOOL: "bool"
NUMBER: /[0-9]+(\\.[0-9]+)?/
STRING\
: "'" /([^'\\\\]|\\\\.)*/ "'"
| "\\"" /([^\\"\\\\]|\\\\.)*/ "\\""
DURATION: DIGIT+ ("s" | "m" | "h" | "d" | "w" | "y")
METRIC_NAME: (LETTER | "_" | ":") (DIGIT | LETTER | "_" | ":")*
LABEL_NAME: (LETTER | "_") (DIGIT | LETTER | "_")*
%import common.DIGIT
%import common.LETTER
%import common.WS
%ignore WS
"""
| 17.996337
| 77
| 0.65113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,903
| 0.997965
|
36e1fd31cd58507e88abf55b7c02a2da45a269b3
| 2,750
|
py
|
Python
|
usersmanagement/models.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | 3
|
2021-03-08T19:14:38.000Z
|
2022-02-01T17:57:31.000Z
|
usersmanagement/models.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | null | null | null |
usersmanagement/models.py
|
Open-CMMS/openCMMS_backend
|
56511ebac83a5dc1fb8768a98bc675e88530a447
|
[
"BSD-3-Clause"
] | null | null | null |
"""This file contain the model for the usermanagement app."""
from django.contrib.auth.models import AbstractUser, Group, Permission
from django.db import models
class UserProfile(AbstractUser):
"""
Define a user.
Here, we use heritage of abstract user and addition of the field nb_tries
to detect if the user use a false password to login.
"""
nb_tries = models.IntegerField(default=0)
USERNAME_FIELD = 'username'
class Meta:
"""Add metadata on the class."""
ordering = ('pk',)
def deactivate_user(self):
"""Deactivate a user."""
self.is_active = False
def reactivate_user(self):
"""Reactivate a user if it was deactivated, else, do nothing."""
if not self.is_active:
self.is_active = True
def __repr__(self):
"""Define formal representation of a user."""
return "<User: id={id}, username='{name}'>".format(id=self.id, name=self.username)
class TeamType(models.Model):
"""
Define a team type.
It inherits of Model class and redefine _apply_ and __str__ methods.
"""
name = models.CharField(max_length=200)
perms = models.ManyToManyField(
Permission,
verbose_name='Team Type permissions',
blank=True,
help_text='Specific permissions for this team type.',
related_name="teamType_set",
related_query_name="teamType"
)
def __str__(self):
"""Return the name of the teamtype."""
return self.name
def __repr__(self):
"""Define formal representation of a team type."""
return "<TeamType: id={id}, name='{name}', permissions={perms}>".format(
id=self.id, name=self.name, perms=self.perms
)
def _apply_(self):
teams_with_this_teamtype = self.team_set.all()
for team in teams_with_this_teamtype:
# team.permissions.set()
team.permissions.set(list(self.perms.all()))
class Team(Group):
"""
Define a team.
It inherits of Group class and define set_team_type.
"""
team_type = models.ForeignKey(
TeamType,
verbose_name="Team Type",
on_delete=models.CASCADE,
help_text='Group of users, extends the auth.models.Group model',
related_name="team_set",
related_query_name="team",
blank=False,
null=True
)
def set_team_type(self, new_team_type):
"""Assign the team type to the team."""
self.team_type = new_team_type
self.save()
new_team_type._apply_()
def __repr__(self):
"""Define formal representation of a team."""
return "<Team: id={id}, team_type='{name}'>".format(id=self.id, name=self.team_type)
| 28.350515
| 92
| 0.627273
| 2,579
| 0.937818
| 0
| 0
| 0
| 0
| 0
| 0
| 1,098
| 0.399273
|
36e3612bbbacdd9cd6a33c5bc043ceb7c94b8118
| 572
|
py
|
Python
|
resrc/utils/templatetags/gravatar.py
|
theWhiteFox/resrc
|
d62bcf3ba2a55f50ae38a1e606072ee3d6025da5
|
[
"MIT"
] | 274
|
2015-01-02T08:57:58.000Z
|
2022-03-11T11:44:44.000Z
|
resrc/utils/templatetags/gravatar.py
|
ninjaCheery/resrc
|
8af3a1a3617fd305a2c8aecffb609ed3e9c1addc
|
[
"MIT"
] | 8
|
2015-05-19T02:54:49.000Z
|
2016-07-07T18:10:40.000Z
|
resrc/utils/templatetags/gravatar.py
|
ninjaCheery/resrc
|
8af3a1a3617fd305a2c8aecffb609ed3e9c1addc
|
[
"MIT"
] | 112
|
2015-01-03T18:59:23.000Z
|
2019-10-08T11:49:18.000Z
|
# -*- coding: utf-8 -*-:
from django import template
import urllib
import hashlib
register = template.Library()
def gravatar(email, size=80, username=None):
gravatar_url = "http://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({
'gravatar_id': hashlib.md5(email).hexdigest(),
'size': str(size)
})
if username is not None:
return """<img src="%s" alt="gravatar for %s" />""" % (gravatar_url, username)
else:
return """<img src="%s" alt="gravatar" />""" % (gravatar_url)
register.simple_tag(gravatar)
| 26
| 86
| 0.631119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 161
| 0.281469
|
36e397fd23e48d333c1f759c070a0a56a3fe0024
| 11,149
|
py
|
Python
|
utils/torch_utils.py
|
misads/torch_image_template
|
db55be6fcebdb6b0c5c739e505b8a7a2eb81c3c1
|
[
"MIT"
] | 5
|
2019-12-23T05:13:15.000Z
|
2020-04-09T03:47:53.000Z
|
utils/torch_utils.py
|
misads/torch_image_template
|
db55be6fcebdb6b0c5c739e505b8a7a2eb81c3c1
|
[
"MIT"
] | null | null | null |
utils/torch_utils.py
|
misads/torch_image_template
|
db55be6fcebdb6b0c5c739e505b8a7a2eb81c3c1
|
[
"MIT"
] | null | null | null |
# encoding=utf-8
"""
Misc PyTorch utils
Author: xuhaoyu@tju.edu.cn
update 12.7
Usage:
`from torch_utils import *`
`func_name()` # to call functions in this file
"""
from datetime import datetime
import math
import os
import torch
import torch.nn as nn
from tensorboardX import SummaryWriter
##############################
# Functional utils
##############################
from utils.misc_utils import format_num
def clamp(x, min=0.01, max=0.99):
"""
value > max will be set to max
value < min will be set to min
:param x: input tensor
:param min:
:param max:
:return:
"""
return torch.clamp(x, min, max)
def repeat(x: torch.Tensor, *sizes):
"""
Example:
>>> t = repeat(t, 1, 3, 1, 1)
# t = t.repeat(1, 3, 1, 1) or t = torch.cat([t, t, t], dim=1)
:param x:
:param sizes:
:return:
"""
return x.repeat(*sizes)
def tensor2im(x: torch.Tensor, norm=False, dtype='float32'):
"""
:param x: [n, c, h, w] float32 type
:param dtype:
:return:
"""
if norm:
x = (x + 1) / 2
x[x > 1] = 1
x[x < 0] = 0
return x.detach().cpu().data[0]
##############################
# Network utils
##############################
def print_network(net: nn.Module, print_size=False):
num_params = 0
print(net)
for name, param in net.named_parameters():
num_params += param.numel()
size = list(param.size())
if len(size) > 1:
if print_size:
print(name, size[1:2]+size[:1]+size[2:], format_num(param.numel()))
else:
print(name, size[1:2] + size[:1] + size[2:])
print('Total number of parameters: %s' % format_num(num_params))
print('The size of receptive field: %s' % format_num(receptive_field(net)))
def receptive_field(net):
def _f(output_size, ksize, stride, dilation):
return (output_size - 1) * stride + ksize * dilation - dilation + 1
stats = []
for m in net.modules():
if isinstance(m, torch.nn.Conv2d):
stats.append((m.kernel_size, m.stride, m.dilation))
rsize = 1
for (ksize, stride, dilation) in reversed(stats):
if type(ksize) == tuple: ksize = ksize[0]
if type(stride) == tuple: stride = stride[0]
if type(dilation) == tuple: dilation = dilation[0]
rsize = _f(rsize, ksize, stride, dilation)
return rsize
##############################
# Abstract Meters class
##############################
class Meters(object):
def __init__(self):
pass
def update(self, new_dic):
raise NotImplementedError
def __getitem__(self, key):
raise NotImplementedError
def keys(self):
raise NotImplementedError
def items(self):
return self.dic.items()
class AverageMeters(Meters):
"""
Example:
avg_meters = AverageMeters()
for i in range(100):
avg_meters.update({'f': i})
print(str(avg_meters))
"""
def __init__(self, dic=None, total_num=None):
self.dic = dic or {}
# self.total_num = total_num
self.total_num = total_num or {}
def update(self, new_dic):
for key in new_dic:
if not key in self.dic:
self.dic[key] = new_dic[key]
self.total_num[key] = 1
else:
self.dic[key] += new_dic[key]
self.total_num[key] += 1
# self.total_num += 1
def __getitem__(self, key):
return self.dic[key] / self.total_num[key]
def __str__(self):
keys = sorted(self.keys())
res = ''
for key in keys:
res += (key + ': %.4f' % self[key] + ' | ')
return res
def keys(self):
return self.dic.keys()
class ExponentialMovingAverage(Meters):
"""
Example:
ema_meters = ExponentialMovingAverage(0.98)
for i in range(100):
ema_meters.update({'f': i})
print(str(ema_meters))
"""
def __init__(self, decay=0.9, dic=None, total_num=None):
self.decay = decay
self.dic = dic or {}
# self.total_num = total_num
self.total_num = total_num or {}
def update(self, new_dic):
decay = self.decay
for key in new_dic:
if not key in self.dic:
self.dic[key] = (1 - decay) * new_dic[key]
self.total_num[key] = 1
else:
self.dic[key] = decay * self.dic[key] + (1 - decay) * new_dic[key]
self.total_num[key] += 1
# self.total_num += 1
def __getitem__(self, key):
return self.dic[key] # / self.total_num[key]
def __str__(self):
keys = sorted(self.keys())
res = ''
for key in keys:
res += (key + ': %.4f' % self[key] + ' | ')
return res
def keys(self):
return self.dic.keys()
##############################
# Checkpoint helper
##############################
def load_ckpt(model, ckpt_path):
"""
Example:
class Model(nn.Module):
....
model = Model().cuda()
load_ckpt(model, 'model.pt')
:param model: object of a subclass of nn.Module
:param ckpt_path: *.pt file to load
:return:
"""
model.load_state_dict(torch.load(ckpt_path))
def save_ckpt(model, ckpt_path):
"""
Example:
class Model(nn.Module):
....
model = Model().cuda()
save_ckpt(model, 'model.pt')
:param model: object of a subclass of nn.Module
:param ckpt_path: *.pt file to save
:return:
"""
torch.save(model.state_dict(), ckpt_path)
##############################
# LR_Scheduler
##############################
class LR_Scheduler(object):
"""Learning Rate Scheduler
Example:
>>> scheduler = LR_Scheduler('cosine', opt.lr, opt.epochs, len(dataloader), warmup_epochs=20)
>>> for i, data in enumerate(dataloader)
>>> scheduler(self.g_optimizer, i, epoch)
Step mode: ``lr = baselr * 0.1 ^ {floor(epoch-1 / lr_step)}`` 每到达lr_step, lr就乘以0.1
Cosine mode: ``lr = baselr * 0.5 * (1 + cos(iter/maxiter))``
Poly mode: ``lr = baselr * (1 - iter/maxiter) ^ 0.9``
iters_per_epoch: number of iterations per epoch
"""
def __init__(self, mode, base_lr, num_epochs, iters_per_epoch=0,
lr_step=0, warmup_epochs=0, logger=None):
"""
:param mode: `step` `cos` or `poly`
:param base_lr:
:param num_epochs:
:param iters_per_epoch:
:param lr_step: lr step to change lr/ for `step` mode
:param warmup_epochs:
:param logger:
"""
self.mode = mode
print('Using {} LR Scheduler!'.format(self.mode))
self.lr = base_lr
if mode == 'step':
assert lr_step
self.lr_step = lr_step
self.iters_per_epoch = iters_per_epoch
self.N = num_epochs * iters_per_epoch
self.epoch = -1
self.warmup_iters = warmup_epochs * iters_per_epoch
self.logger = logger
if logger:
self.logger.info('Using {} LR Scheduler!'.format(self.mode))
def __call__(self, optimizer, i, epoch):
T = epoch * self.iters_per_epoch + i
if self.mode == 'cos':
lr = 0.5 * self.lr * (1 + math.cos(1.0 * T / self.N * math.pi))
elif self.mode == 'poly':
lr = self.lr * pow((1 - 1.0 * T / self.N), 0.9)
elif self.mode == 'step':
lr = self.lr * (0.1 ** (epoch // self.lr_step))
else:
raise NotImplemented
# warm up lr schedule
if self.warmup_iters > 0 and T < self.warmup_iters:
lr = lr * 1.0 * T / self.warmup_iters
if epoch > self.epoch:
if self.logger:
self.logger.info('\n=>Epoches %i, learning rate = %.4f' % (epoch, lr))
else:
print('\nepoch: %d lr: %.6f' % (epoch, lr))
self.epoch = epoch
assert lr >= 0
self._adjust_learning_rate(optimizer, lr)
def _adjust_learning_rate(self, optimizer, lr):
if len(optimizer.param_groups) == 1:
optimizer.param_groups[0]['lr'] = lr
else:
# enlarge the lr at the head
optimizer.param_groups[0]['lr'] = lr
for i in range(1, len(optimizer.param_groups)):
optimizer.param_groups[i]['lr'] = lr * 10
"""
TensorBoard
Example:
writer = create_summary_writer(os.path.join(self.basedir, 'logs'))
write_meters_loss(writer, 'train', avg_meters, iteration)
write_loss(writer, 'train', 'F1', 0.78, iteration)
write_image(writer, 'train', 'input', img, iteration)
# shell
tensorboard --logdir {base_path}/logs
"""
def create_summary_writer(log_dir):
if not os.path.exists(log_dir):
os.makedirs(log_dir)
log_dir = os.path.join(log_dir, datetime.now().strftime('%m-%d_%H-%M-%S'))
if not os.path.exists(log_dir):
os.mkdir(log_dir)
writer = SummaryWriter(log_dir, max_queue=0, flush_secs=10)
return writer
def write_loss(writer: SummaryWriter, prefix, loss_name: str, value: float, iteration):
"""
Example:
write_loss(writer, 'train', 'F1', 0.78, iteration)
:param writer: writer created by create_summary_writer()
:param prefix: e.g. for '/train/loss1' is 'train'
:param loss_name:
:param value:
:param iteration:
:return:
"""
writer.add_scalar(
os.path.join(prefix, loss_name), value, iteration)
def write_image(writer: SummaryWriter, prefix, image_name: str, img, iteration, dataformats='CHW'):
"""
Example:
write_image(writer, 'train', 'input', img, iteration)
:param writer: writer created by create_summary_writer()
:param prefix:
:param image_name:
:param img: image Tensor, should be channel first. Specific size of [C, H, W].
:param iteration:
:param dataformats: 'CHW' or 'HWC' or 'NCHW'''
:return:
"""
writer.add_image(
os.path.join(prefix, image_name), img, iteration, dataformats=dataformats)
def write_meters_loss(writer: SummaryWriter, prefix, avg_meters: Meters, iteration):
"""
Example:
writer = create_summary_writer(os.path.join(self.basedir, 'logs'))
ema_meters = ExponentialMovingAverage(0.98)
for i in range(100):
ema_meters.update({'f1': i, 'f2': i*0.5})
write_meters_loss(writer, 'train', ema_meters, i)
:param writer:
:param prefix:
:param avg_meters: avg_meters param should be a Meters subclass
:param iteration:
:return:
"""
for key in avg_meters.keys():
meter = avg_meters[key]
writer.add_scalar(
os.path.join(prefix, key), meter, iteration)
| 28.296954
| 101
| 0.544713
| 5,173
| 0.463489
| 0
| 0
| 0
| 0
| 0
| 0
| 4,759
| 0.426395
|
36e43c1fea8564dd6886b6925030fdbb9a39b677
| 19,421
|
py
|
Python
|
library/route_vpn.py
|
sebbbastien/ansible-stonesoft
|
ebc0d1c0720f8d79224ae58a80d3e9155bda4385
|
[
"Apache-2.0"
] | null | null | null |
library/route_vpn.py
|
sebbbastien/ansible-stonesoft
|
ebc0d1c0720f8d79224ae58a80d3e9155bda4385
|
[
"Apache-2.0"
] | null | null | null |
library/route_vpn.py
|
sebbbastien/ansible-stonesoft
|
ebc0d1c0720f8d79224ae58a80d3e9155bda4385
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2017 David LePage
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: route_vpn
short_description: Create a route based VPN
description:
- Create a route based VPN. Route VPN's are typically created between a managed
Stonesoft FW and a 3rd party device (AWS, Azure, etc). You must pre-create
the internal FW prior to running this module. If doing an IPSEC wrapped VPN,
you must also specify a tunnel interface for which to bind (must be pre-created)
and specify an IP address/interface id to specify the ISAKMP listener.
version_added: '2.5'
options:
name:
description:
- The name for this route VPN.
required: true
type: str
type:
description:
- The type of IPSEC vpn to create
type: str
choices: ['ipsec', 'gre']
default: ipsec
enabled:
description:
- Whether the VPN is enabled or disabled
type: bool
local_gw:
description:
- Represents the locally managed Stonesoft FW gateway. If the remote_gw is also
a Stonesoft managed device, use the same parameters to define
type: str
suboptions:
name:
description:
- The name of the Stonesoft FW gateway
type: str
required: true
tunnel_interface:
description:
- The ID for the tunnel interface
type: str
required: true
interface_id:
description:
- The interface ID to enable IPSEC. If multiple IP addresses exist
on the interface, IPSEC will be enabled on all. Use I(interface_ip) as
an alternative.
type: str
required: true
address:
description:
- An interface IP addresses to enable IPSEC. If there are multiple IP addresses
on a single interface specified with I(interface_id) and you want to bind to
only that address
type: str
required: false
remote_gw:
description:
- The name of the remote GW. If the remote gateway is an Stonesoft FW, it must
pre-exist. Use the local_gw documentation for settings. If it is an External Gateway,
this module will create the gateway based on the gateway settings provided if it
doesn't already exist. This documents an External Gateway configuration. See also
the external_gateway module for additional external endpoint settings.
type: str
suboptions:
name:
description:
- The name of the External Gateway. If the gateway does not exist, it will be created
if you provide the I(address) and I(networks) parameters.
type: str
required: true
preshared_key:
description:
- If this is an External Gateway, you must provide a pre-shared key to be used between
the gateways. If the gateway is another Stonesoft FW, a key will be auto-generated.
type: str
type:
description:
- Set to external_gateway if this is an external gateway element type
type: str
vpn_site:
description:
- Defines the VPN site for the protected networks on other end of external gateway
type: dict
suboptions:
name:
description:
- Name of VPN site
type: str
required: true
network:
description:
- A valid element type from SMC. Typically this is network or host. List elements
should be valid names of the specified element
type: list
external_endpoint:
description:
- The external endpoint gateways where the RBVPN will terminate. Any options that are
supported by the smcpython ExternalEndpoint.create constructor are supported values
for this definition
type: list
required: true
suboptions:
name:
description:
- Name of the external endpoint
type: str
required: True
address:
description:
- A valid IP address of the external gateway
type: str
required: true
enabled:
description:
- Whether to enable the gateway.
type: bool
tags:
description:
- Provide an optional category tag to the engine. If the category does not
exist, it will be created
type: list
state:
description:
- Specify a create or delete operation
required: false
default: present
choices:
- present
- absent
extends_documentation_fragment: stonesoft
notes:
- Login credential information is either obtained by providing them directly
to the task/play, specifying an alt_filepath to read the credentials from to
the play, or from environment variables (in that order). See
U(http://smc-python.readthedocs.io/en/latest/pages/session.html) for more
information.
requirements:
- smc-python
author:
- David LePage (@gabstopper)
'''
EXAMPLES = '''
- name: Route VPN between internal engine and 3rd party external gateway
register: result
route_vpn:
smc_logging:
level: 10
path: ansible-smc.log
enabled: true
local_gw:
address: 50.50.50.1
name: newcluster
tunnel_interface: '1001'
name: myrbvpn
remote_gw:
external_endpoint:
- address: 33.33.33.41
enabled: true
name: extgw3 (33.33.33.41)
connection_type: 'Active 1'
- address: 34.34.34.34
enabled: true
name: endpoint2 (34.34.34.34)
connection_type: 'Active 1'
- address: 44.44.44.44
enabled: false
name: extgw4 (44.44.44.44)
connection_type: 'Active 1'
- address: 33.33.33.50
enabled: false
name: endpoint1 (33.33.33.50)
connection_type: 'Active 1'
name: extgw3
preshared_key: '********'
type: external_gateway
vpn_site:
name: extgw3-site
network:
- network-172.18.15.0/24
- network-172.18.1.0/24
- network-172.18.2.0/24
- name: Create a new Route VPN with internal gateways
route_vpn:
smc_logging:
level: 10
path: ansible-smc.log
name: myrbvpn
type: ipsec
local_gw:
name: newcluster
tunnel_interface: 1001
interface_id: 1
#address: 2.2.2.2
remote_gw:
name: myfw
tunnel_interface: 1000
interface_id: 0
tags:
- footag
'''
RETURN = '''
changed:
description: Whether or not the change succeeded
returned: always
type: bool
state:
description: The current state of the element
return: always
type: dict
'''
import traceback
from ansible.module_utils.stonesoft_util import (
StonesoftModuleBase, Cache)
try:
from smc.vpn.route import RouteVPN, TunnelEndpoint
from smc.vpn.elements import ExternalGateway
from smc.core.engine import Engine
from smc.api.exceptions import SMCException
except ImportError:
pass
class StonesoftRouteVPN(StonesoftModuleBase):
def __init__(self):
self.module_args = dict(
name=dict(type='str', required=True),
type=dict(default='ipsec', type='str', choices=['ipsec', 'gre']),
local_gw=dict(type='dict'),
remote_gw=dict(type='dict'),
enabled=dict(type='bool'),
tags=dict(type='list'),
state=dict(default='present', type='str', choices=['present', 'absent'])
)
self.name = None
self.type = None
self.enabled = None
self.local_gw = None
self.remote_gw = None
self.tags = None
required_if=([
('state', 'present', ['local_gw', 'remote_gw'])
])
self.results = dict(
changed=False,
state=[]
)
super(StonesoftRouteVPN, self).__init__(self.module_args, supports_check_mode=True,
required_if=required_if)
def exec_module(self, **kwargs):
state = kwargs.pop('state', 'present')
for name, value in kwargs.items():
setattr(self, name, value)
rbvpn = self.fetch_element(RouteVPN)
changed = False
if state == 'present':
# Short circuit disable
if rbvpn and self.enabled is not None and (rbvpn.enabled and not self.enabled):
rbvpn.disable()
self.results['changed'] = True
return self.results
local_engine = self.get_managed_gateway(self.local_gw)
local_tunnel_interface = self.get_tunnel_interface(
local_engine, self.local_gw.get('tunnel_interface'))
local_internal_endpoint = self.get_ipsec_endpoint(
local_engine, self.local_gw.get('interface_id'),
address=self.local_gw.get('address'))
if self.remote_gw.get('type', None) != 'external_gateway':
remote_engine = self.get_managed_gateway(self.remote_gw)
remote_tunnel_interface = self.get_tunnel_interface(
remote_engine, self.remote_gw.get('tunnel_interface'))
remote_internal_endpoint = self.get_ipsec_endpoint(
remote_engine, self.remote_gw.get('interface_id'),
address=self.remote_gw.get('address'))
else:
# External Gateway
req = ('name', 'preshared_key', 'external_endpoint')
for required in req:
if required not in self.remote_gw:
self.fail(msg='Missing required field for the external endpoint '
'configuration: %s' % required)
cache = Cache()
external_gateway = dict(name=self.remote_gw['name'])
# External Endpoints are defined in the External Gateway.
# Build the data structures for a call to ExternalGateway.update_or_create
ctypes = [] # connection_type element
for endpoint in self.remote_gw['external_endpoint']:
if 'name' not in endpoint or 'address' not in endpoint:
self.fail(msg='An external endpoint must have at least a '
'name and an address definition.')
# SMC version 6.5 requires the connection type element to specify
# the role for the given external endpoint
if 'connection_type' not in endpoint:
self.fail(msg='You must provide the connection_type parameter '
'when creating an external endpoint')
ctypes.append(endpoint.get('connection_type'))
cache.add(dict(connection_type=ctypes))
if cache.missing:
self.fail(msg=cache.missing)
# Verify specified VPN Sites exist before continuing
if 'vpn_site' in self.remote_gw:
site_name = self.remote_gw.get('vpn_site', {}).pop('name', None)
if not site_name:
self.fail(msg='A VPN site requires a name to continue')
# Get the elements
cache.add(self.remote_gw.get('vpn_site', {}))
vpn_site_types = self.remote_gw.get('vpn_site', {}).keys() # Save the VPN site types for retrieval
if cache.missing:
self.fail(msg='Could not find the specified elements for the '
'VPN site configuration: %s' % cache.missing)
site_element = [element.href for element_type in vpn_site_types
for element in cache.get_type(element_type)]
external_gateway.update(
vpn_site=[dict(name=site_name, site_element=site_element)])
external_endpoint = []
for endpoint in self.remote_gw['external_endpoint']:
endpoint.update(connection_type_ref=\
cache.get('connection_type',endpoint.pop('connection_type')).href)
external_endpoint.append(endpoint)
external_gateway.update(external_endpoint=external_endpoint)
try:
if state == 'present':
if self.check_mode:
return self.results
# Create the tunnel endpoints
if not rbvpn:
local_gateway = TunnelEndpoint.create_ipsec_endpoint(
local_engine.vpn.internal_gateway, local_tunnel_interface)
# Enable the IPSEC listener on specified interface/s
if self.update_ipsec_listener(local_internal_endpoint):
changed = True
is_external = self.remote_gw.get('type', None) == 'external_gateway'
if not is_external:
remote_gateway = TunnelEndpoint.create_ipsec_endpoint(
remote_engine.vpn.internal_gateway, remote_tunnel_interface)
if self.update_ipsec_listener(remote_internal_endpoint):
changed = True
else: # Update or Create
gw, updated, created = ExternalGateway.update_or_create(
with_status=True, **external_gateway)
remote_gateway = TunnelEndpoint.create_ipsec_endpoint(gw)
if created or updated:
changed = True
vpn = dict(
name=self.name,
local_endpoint=local_gateway,
remote_endpoint=remote_gateway)
if is_external:
vpn.update(preshared_key=self.remote_gw['preshared_key'])
rbvpn = RouteVPN.create_ipsec_tunnel(**vpn)
changed = True
else:
#TODO: Update or create from top level RBVPN
#rbvpn.update_or_create()
if rbvpn and self.enabled is not None and (not rbvpn.enabled and self.enabled):
rbvpn.enable()
changed = True
if self.remote_gw.get('type') == 'external_gateway':
gw, updated, created = ExternalGateway.update_or_create(
with_status=True, **external_gateway)
if updated or created:
changed = True
self.results['state'] = rbvpn.data.data
self.results['changed'] = changed
elif state == 'absent':
if rbvpn:
rbvpn.delete()
changed = True
except SMCException as err:
self.fail(msg=str(err), exception=traceback.format_exc())
self.results['changed'] = changed
return self.results
def get_ipsec_endpoint(self, engine, interface_id, address=None):
"""
Get the internal endpoint for which to enable IPSEC on for the
internal FW. This is required for IPSEC based RBVPN.
:param engine Engine: engine reference, already obtained
:param str interface_id: interface ID specified for IPSEC listener
:rtype: list(InternalEndpoint)
"""
try:
interface = engine.interface.get(interface_id)
except SMCException as e:
self.fail(msg='Fetch IPSEC interface for endpoint failed: %s' % str(e))
internal_endpoint = engine.vpn.internal_endpoint # Collection
endpoints = []
if address:
ep = internal_endpoint.get_exact(address)
if ep:
endpoints.append(ep)
else: # Get all endpoints for the interface
for addr, network, nicid in interface.addresses: # @UnusedVariable
if internal_endpoint.get_exact(addr):
endpoints.append(
internal_endpoint.get_exact(addr))
if not endpoints:
self.fail(msg='No IPSEC endpoint interfaces found. The specified '
'interface ID was: %s and address: %s' % (interface_id, address))
return endpoints
def update_ipsec_listener(self, internal_endpoints):
"""
Update the internal endpoint to enable the IPSEC listener on
the specified interface/s.
:param list(InternalEndpoint) internal_endpoints: internal endpoints
:rtype: bool
"""
changed = False
for endpoint in internal_endpoints:
if not endpoint.enabled:
endpoint.update(enabled=True)
changed = True
return changed
def get_tunnel_interface(self, engine, interface_id):
"""
Get the specified Tunnel Interface for the gateway.
:param engine Engine: engine ref
:param str interface_id: pulled from gateway yaml
:rtype: TunnelInterface
"""
tunnel_interface = None
for interface in engine.tunnel_interface:
if interface.interface_id == str(interface_id):
tunnel_interface = interface
break
if not tunnel_interface:
self.fail(msg='Cannot find specified tunnel interface: %s for specified gateway '
'%s' % (interface_id, engine.name))
return tunnel_interface
def get_managed_gateway(self, gw):
"""
If the gateway is a locally managed SMC gateway, tunnel interface and
an IPSEC interface is required.
:param dict local_gw,remote_gw: yaml definition
:rtype: Engine
"""
for req in ('name', 'tunnel_interface', 'interface_id'):
if req not in gw:
self.fail(msg='Managed gateway requires name, interface_id and '
'tunnel_interface fields')
managed_gw = Engine.get(gw.get('name'), raise_exc=False)
if not managed_gw:
self.fail(msg='The specified managed gateway specified does not '
'exist: %s' % gw.get('name'))
return managed_gw
def main():
StonesoftRouteVPN()
if __name__ == '__main__':
main()
| 37.204981
| 118
| 0.563771
| 11,966
| 0.616137
| 0
| 0
| 0
| 0
| 0
| 0
| 9,916
| 0.510581
|
36e64aadf7aac130d35406e0cf99b998faa79a22
| 6,407
|
py
|
Python
|
tfx/experimental/pipeline_testing/pipeline_recorder_utils.py
|
ntakouris/tfx
|
deb618730dc7675c8e9dc75e03b8ca795d49653d
|
[
"Apache-2.0"
] | 1
|
2020-06-09T03:50:59.000Z
|
2020-06-09T03:50:59.000Z
|
tfx/experimental/pipeline_testing/pipeline_recorder_utils.py
|
tommywei110/tfx
|
2152534c81dbc06dc90de37c56e4d63bf810f150
|
[
"Apache-2.0"
] | null | null | null |
tfx/experimental/pipeline_testing/pipeline_recorder_utils.py
|
tommywei110/tfx
|
2152534c81dbc06dc90de37c56e4d63bf810f150
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Recording pipeline from MLMD metadata."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from typing import Iterable, List, Mapping, Optional, Text, Tuple
from absl import logging
import tensorflow as tf
from tfx.orchestration import metadata
from tfx.utils import io_utils
from ml_metadata.proto import metadata_store_pb2
def _get_paths(metadata_connection: metadata.Metadata, execution_ids: List[int],
output_dir: Text) -> Iterable[Tuple[Text, Text]]:
"""Yields tuple with source and destination artifact uris.
The destination artifact uris are located in the output_dir. The source
artifact uris are retrieved using execution ids.
Args:
metadata_connection: Instance of metadata.Metadata for I/O to MLMD.
execution_ids: List of execution ids of a pipeline run.
output_dir: Directory path where the pipeline outputs should be recorded.
Yields:
Iterable over tuples of source uri and destination uri.
"""
events = metadata_connection.store.get_events_by_execution_ids(execution_ids)
output_events = [
x for x in events if x.type == metadata_store_pb2.Event.OUTPUT
]
unique_artifact_ids = list({x.artifact_id for x in output_events})
for artifact in metadata_connection.store.get_artifacts_by_id(
unique_artifact_ids):
src_uri = artifact.uri
artifact_properties = artifact.custom_properties
component_id = artifact_properties['producer_component'].string_value
name = artifact_properties['name'].string_value
dest_uri = os.path.join(output_dir, component_id, name)
yield (src_uri, dest_uri)
def _get_execution_dict(
metadata_connection: metadata.Metadata
) -> Mapping[Text, List[metadata_store_pb2.Execution]]:
"""Returns a dictionary holding list of executions for all run_id in MLMD.
Args:
metadata_connection: Instance of metadata.Metadata for I/O to MLMD.
Returns:
A dictionary that holds list of executions for a run_id.
"""
execution_dict = collections.defaultdict(list)
for execution in metadata_connection.store.get_executions():
execution_run_id = execution.properties['run_id'].string_value
execution_dict[execution_run_id].append(execution)
return execution_dict
def _get_latest_executions(
metadata_connection: metadata.Metadata,
pipeline_name: Text) -> List[metadata_store_pb2.Execution]:
"""Fetches executions associated with the latest context.
Args:
metadata_connection: Instance of metadata.Metadata for I/O to MLMD.
pipeline_name: Name of the pipeline to rerieve the latest executions for.
Returns:
List of executions for the latest run of a pipeline with the given
pipeline_name.
"""
pipeline_run_contexts = [
c for c in metadata_connection.store.get_contexts_by_type(
metadata._CONTEXT_TYPE_PIPELINE_RUN) # pylint: disable=protected-access
if c.properties['pipeline_name'].string_value == pipeline_name
]
latest_context = max(
pipeline_run_contexts, key=lambda c: c.last_update_time_since_epoch)
return metadata_connection.store.get_executions_by_context(latest_context.id)
def record_pipeline(output_dir: Text, metadata_db_uri: Optional[Text],
host: Optional[Text], port: Optional[int],
pipeline_name: Optional[Text],
run_id: Optional[Text]) -> None:
"""Record pipeline run with run_id to output_dir.
For the beam pipeline, metadata_db_uri is required. For KFP pipeline,
host and port should be specified. If run_id is not specified, then
pipeline_name ought to be specified in order to fetch the latest execution
for the specified pipeline.
Args:
output_dir: Directory path where the pipeline outputs should be recorded.
metadata_db_uri: Uri to metadata db.
host: Hostname of the metadata grpc server
port: Port number of the metadata grpc server.
pipeline_name: Pipeline name, which is required if run_id isn't specified.
run_id: Pipeline execution run_id.
Raises:
ValueError: In cases of invalid arguments:
- metadata_db_uri is None or host and/or port is None.
- run_id is None and pipeline_name is None.
FileNotFoundError: if the source artifact uri does not already exist.
"""
if host is not None and port is not None:
metadata_config = metadata_store_pb2.MetadataStoreClientConfig(
host=host, port=port)
elif metadata_db_uri is not None:
metadata_config = metadata.sqlite_metadata_connection_config(
metadata_db_uri)
else:
raise ValueError('For KFP, host and port are required. '
'For beam pipeline, metadata_db_uri is required.')
with metadata.Metadata(metadata_config) as metadata_connection:
if run_id is None:
if pipeline_name is None:
raise ValueError('If the run_id is not specified,'
' pipeline_name should be specified')
# fetch executions of the most recently updated execution context.
executions = _get_latest_executions(metadata_connection, pipeline_name)
else:
execution_dict = _get_execution_dict(metadata_connection)
if run_id in execution_dict:
executions = execution_dict[run_id]
else:
raise ValueError(
'run_id {} is not recorded in the MLMD metadata'.format(run_id))
execution_ids = [e.id for e in executions]
for src_uri, dest_uri in _get_paths(metadata_connection, execution_ids,
output_dir):
if not tf.io.gfile.exists(src_uri):
raise FileNotFoundError('{} does not exist'.format(src_uri))
io_utils.copy_dir(src_uri, dest_uri)
logging.info('Pipeline Recorded at %s', output_dir)
| 39.795031
| 82
| 0.74044
| 0
| 0
| 1,260
| 0.19666
| 0
| 0
| 0
| 0
| 2,973
| 0.464024
|
36e67ff06717bc841187da318c7c341f30def84e
| 16,034
|
py
|
Python
|
src/third_party/wiredtiger/lang/python/setup_pip.py
|
SunguckLee/real-mongodb
|
fef0e44fafc6d3709a84101327e7d2f54dd18d88
|
[
"Apache-2.0"
] | 4
|
2018-02-06T01:53:12.000Z
|
2018-02-20T01:47:36.000Z
|
src/third_party/wiredtiger/lang/python/setup_pip.py
|
SunguckLee/real-mongodb
|
fef0e44fafc6d3709a84101327e7d2f54dd18d88
|
[
"Apache-2.0"
] | null | null | null |
src/third_party/wiredtiger/lang/python/setup_pip.py
|
SunguckLee/real-mongodb
|
fef0e44fafc6d3709a84101327e7d2f54dd18d88
|
[
"Apache-2.0"
] | 3
|
2018-02-06T01:53:18.000Z
|
2021-07-28T09:48:15.000Z
|
#!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# This script builds a Python source distribution that can built be installed
# via pip install. This must be run in a git repository to determine the files
# to package. Also as a prerequisite, SWIG must be run as the generated files
# are part of the package. To create the distribution, in this directory, run
# "python setup_pip.py sdist", this creates a tar.gz file under ./dist .
from __future__ import print_function
import os, os.path, re, shutil, site, sys
from setuptools import setup, Distribution
from distutils.extension import Extension
import distutils.sysconfig
import distutils.ccompiler
from distutils.errors import CompileError, LinkError
import subprocess
from subprocess import call
import setuptools.command.install
import setuptools.command.build_ext
# msg --
# Print a message to stderr.
def msg(s):
print(os.path.basename(__file__) + ": " + s, file=sys.stderr)
# die --
# For failures, show a message and exit.
def die(s):
msg(s)
sys.exit(1)
# build_commands --
# Run a sequence of commands, and die if any fail.
def build_commands(commands, build_dir, build_env):
for command in commands:
callargs = [ 'sh', '-c', command ]
verbose_command = '"' + '" "'.join(callargs) + '"'
print('running: ' + verbose_command)
if call(callargs, cwd=build_dir, env=build_env) != 0:
die('build command failed: ' + verbose_command)
# check_needed_dependencies --
# Make a quick check of any needed library dependencies, and
# add to the library path and include path as needed. If a library
# is not found, it is not definitive.
def check_needed_dependencies(builtins, inc_paths, lib_paths):
library_dirs = get_library_dirs()
compiler = distutils.ccompiler.new_compiler()
distutils.sysconfig.customize_compiler(compiler)
compiler.set_library_dirs(library_dirs)
missing = []
for name, libname, instructions in builtins:
found = compiler.find_library_file(library_dirs, libname)
if found is None:
msg(libname + ": missing")
msg(instructions)
msg("after installing it, set LD_LIBRARY_PATH or DYLD_LIBRARY_PATH")
missing.append(libname)
else:
package_top = os.path.dirname(os.path.dirname(found))
inc_paths.append(os.path.join(package_top, 'include'))
lib_paths.append(os.path.join(package_top, 'lib'))
# XXX: we are not accounting for other directories that might be
# discoverable via /sbin/ldconfig. It might be better to write a tiny
# compile using -lsnappy, -lz...
#
#if len(missing) > 0:
# die("install packages for: " + str(missing))
# find_executable --
# Locate an executable in the PATH.
def find_executable(exename, path):
p = subprocess.Popen(['which', exename ], stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate('')
out = str(out) # needed for Python3
if out == '':
if err != '':
err = ': "' + err + '"'
die('"' + exename + '": not found in path' + err)
dirname = os.path.dirname(out)
if not dirname in path:
path.append(dirname)
# get_build_path --
# Create a PATH that can be used for installation. Apparently,
# installation commands are run with a restricted PATH, and
# autoreconf/aclocal will not normally be found.
def get_build_path():
build_paths = []
find_executable('autoreconf', build_paths)
find_executable('aclocal', build_paths)
build_path = os.environ['PATH'] + ':' + ':'.join(build_paths)
return build_path
# get_compile_flags --
# Get system specific compile flags. Return a triple: C preprocessor
# flags, C compilation flags and linker flags.
def get_compile_flags(inc_paths, lib_paths):
# Suppress warnings building SWIG generated code
if sys.platform == 'win32' and cc == 'msvc':
cflags = ['/arch:SSE2', '/EHsc']
cppflags = []
ldflags = []
# Windows untested and incomplete, don't claim that it works.
die('Windows is not supported by this setup script')
else:
cflags = [ '-w', '-Wno-sign-conversion', '-std=c11' ]
cppflags = ['-I' + path for path in inc_paths]
cppflags.append('-DHAVE_CONFIG_H')
ldflags = ['-L' + path for path in lib_paths]
if sys.platform == 'darwin':
cflags.extend([ '-arch', 'x86_64' ])
return (cppflags, cflags, ldflags)
# get_sources_curdir --
# Get a list of sources from the current directory
def get_sources_curdir():
DEVNULL = open(os.devnull, 'w')
gitproc = subprocess.Popen(
['git', 'ls-tree', '-r', '--name-only', 'HEAD^{tree}'],
stdin=DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
sources = [line.rstrip() for line in gitproc.stdout.readlines()]
err = gitproc.stderr.read()
gitproc.wait()
subret = gitproc.returncode
if subret != 0 or err:
msg("git command to get sources returned " + str(subret) +
", error=" + str(err))
die("this command must be run in a git repository")
return sources
# get_wiredtiger_versions --
# Read the version information from the RELEASE_INFO file.
def get_wiredtiger_versions(wt_dir):
v = {}
for l in open(os.path.join(wt_dir, 'RELEASE_INFO')):
if re.match(r'WIREDTIGER_VERSION_(?:MAJOR|MINOR|PATCH)=', l):
exec(l, v)
wt_ver = '%d.%d' % (v['WIREDTIGER_VERSION_MAJOR'],
v['WIREDTIGER_VERSION_MINOR'])
wt_full_ver = wt_ver + '.%d' % (v['WIREDTIGER_VERSION_PATCH'])
return (wt_ver, wt_full_ver)
# get_library_dirs
# Build a plausible set of library directories.
def get_library_dirs():
dirs = []
dirs.append("/usr/local/lib")
dirs.append("/usr/local/lib64")
dirs.append("/lib/x86_64-linux-gnu")
dirs.append("/opt/local/lib")
dirs.append("/usr/lib")
dirs.append("/usr/lib64")
for path in ['LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'LIBRARY_PATH']:
if path in os.environ:
dirs.extend(os.environ[path].split(':'))
dirs = list(set(filter(os.path.isdir, dirs)))
return dirs
# source_filter
# Make any needed changes to the sources list. Any entry that
# needs to be moved is returned in a dictionary.
def source_filter(sources):
result = []
movers = dict()
py_dir = os.path.join('lang', 'python')
pywt_dir = os.path.join(py_dir, 'wiredtiger')
pywt_prefix = pywt_dir + os.path.sep
for f in sources:
if not re.match(source_regex, f):
continue
src = f
dest = f
# move all lang/python files to the top level.
if dest.startswith(pywt_prefix):
dest = os.path.basename(dest)
if dest == 'pip_init.py':
dest = '__init__.py'
if dest != src:
movers[dest] = src
result.append(dest)
# Add SWIG generated files
result.append('wiredtiger.py')
movers['wiredtiger.py'] = os.path.join(pywt_dir, '__init__.py')
result.append(os.path.join(py_dir, 'wiredtiger_wrap.c'))
return result, movers
################################################################
# Do some initial setup and checks.
this_abs_script = os.path.abspath(__file__)
this_dir = os.path.dirname(this_abs_script)
pip_command = None
for arg in sys.argv[1:]:
if arg[0] != '-' and pip_command == None:
pip_command = arg
break
if this_dir.endswith(os.sep + os.path.join('lang', 'python')):
wt_dir = os.path.dirname(os.path.dirname(this_dir))
os.chdir(wt_dir)
elif os.path.isfile(os.path.join(this_dir, 'LICENSE')):
wt_dir = this_dir
else:
die('running from an unknown directory')
python3 = (sys.version_info[0] > 2)
if python3:
die('Python3 is not yet supported')
# Ensure that Extensions won't be built for 32 bit,
# that won't work with WiredTiger.
if sys.maxsize < 2**32:
die('need to be running on a 64 bit system, and have a 64 bit Python')
python_rel_dir = os.path.join('lang', 'python')
build_dir = os.path.join(wt_dir, 'build_posix')
makefile = os.path.join(build_dir, 'Makefile')
built_sentinal = os.path.join(build_dir, 'built.txt')
conf_make_dir = 'build_posix'
wt_swig_lib_name = os.path.join(python_rel_dir, '_wiredtiger.so')
################################################################
# Put together build options for the WiredTiger extension.
short_description = 'high performance, scalable, production quality, ' + \
'NoSQL, Open Source extensible platform for data management'
long_description = 'WiredTiger is a ' + short_description + '.\n\n' + \
open(os.path.join(wt_dir, 'README')).read()
wt_ver, wt_full_ver = get_wiredtiger_versions(wt_dir)
build_path = get_build_path()
# We only need a small set of directories to build a WT library,
# we also include any files at the top level.
source_regex = r'^(?:(?:api|build_posix|ext|lang/python|src|dist)/|[^/]*$)'
# The builtins that we include in this distribution.
builtins = [
# [ name, libname, instructions ]
[ 'snappy', 'snappy',
'Note: a suitable version of snappy can be found at\n' + \
' https://github.com/google/snappy/releases/download/' + \
'1.1.3/snappy-1.1.3.tar.gz\n' + \
'It can be installed via: yum install snappy snappy-devel' + \
'or via: apt-get install libsnappy-dev' ],
[ 'zlib', 'z',
'Need to install zlib\n' + \
'It can be installed via: apt-get install zlib1g' ]
]
builtin_names = [b[0] for b in builtins]
builtin_libraries = [b[1] for b in builtins]
# Here's the configure/make operations we perform before the python extension
# is linked.
configure_cmds = [
'./makemake --clean-and-make',
'./reconf',
# force building a position independent library; it will be linked
# into a single shared library with the SWIG interface code.
'CFLAGS="${CFLAGS:-} -fPIC -DPIC" ' + \
'../configure --enable-python --with-builtins=' + ','.join(builtin_names)
]
# build all the builtins, at the moment they are all compressors.
make_cmds = []
for name in builtin_names:
make_cmds.append('(cd ext/compressors/' + name + '/; make)')
make_cmds.append('make libwiredtiger.la')
inc_paths = [ os.path.join(build_dir, 'src', 'include'), build_dir, '.' ]
lib_paths = [ '.' ] # wiredtiger.so is moved into the top level directory
check_needed_dependencies(builtins, inc_paths, lib_paths)
cppflags, cflags, ldflags = get_compile_flags(inc_paths, lib_paths)
# If we are creating a source distribution, create a staging directory
# with just the right sources. Put the result in the python dist directory.
if pip_command == 'sdist':
sources, movers = source_filter(get_sources_curdir())
stage_dir = os.path.join(python_rel_dir, 'stage')
shutil.rmtree(stage_dir, True)
os.makedirs(stage_dir)
shutil.copy2(this_abs_script, os.path.join(stage_dir, 'setup.py'))
for f in sources:
d = os.path.join(stage_dir, os.path.dirname(f))
if not os.path.isdir(d):
os.makedirs(d)
if f in movers:
src = movers[f]
else:
src = f
# Symlinks are not followed in setup, we need to use real files.
shutil.copy2(src, os.path.join(stage_dir, f))
os.chdir(stage_dir)
sys.argv.append('--dist-dir=' + os.path.join('..', 'dist'))
else:
sources = [ os.path.join(python_rel_dir, 'wiredtiger_wrap.c') ]
wt_ext = Extension('_wiredtiger',
sources = sources,
extra_compile_args = cflags + cppflags,
extra_link_args = ldflags,
libraries = builtin_libraries,
extra_objects = [ os.path.join(build_dir, '.libs', 'libwiredtiger.a') ],
include_dirs = inc_paths,
library_dirs = lib_paths,
)
extensions = [ wt_ext ]
env = { "CFLAGS" : ' '.join(cflags),
"CPPFLAGS" : ' '.join(cppflags),
"LDFLAGS" : ' '.join(ldflags),
"PATH" : build_path }
class BinaryDistribution(Distribution):
def is_pure(self):
return False
class WTInstall(setuptools.command.install.install):
def run(self):
self.run_command("build_ext")
return setuptools.command.install.install.run(self)
class WTBuildExt(setuptools.command.build_ext.build_ext):
def __init__(self, *args, **kwargs):
setuptools.command.build_ext.build_ext.__init__(self, *args, **kwargs)
def run(self):
# only run this once
if not os.path.isfile(built_sentinal):
try:
os.remove(makefile)
except OSError:
pass
self.execute(
lambda: build_commands(configure_cmds, conf_make_dir, env), [],
'wiredtiger configure')
if not os.path.isfile(makefile):
die('configure failed, file does not exist: ' + makefile)
self.execute(
lambda: build_commands(make_cmds, conf_make_dir, env), [],
'wiredtiger make')
open(built_sentinal, 'a').close()
return setuptools.command.build_ext.build_ext.run(self)
setup(
name = 'wiredtiger',
version = wt_full_ver,
author = 'The WiredTiger Development Team, part of MongoDB',
author_email = 'info@wiredtiger.com',
description = short_description,
license='GPL2,GPL3,Commercial',
long_description = long_description,
url = 'http://source.wiredtiger.com/',
keywords = 'scalable NoSQL database datastore engine open source',
packages = ['wiredtiger'],
ext_package = 'wiredtiger',
ext_modules = extensions,
include_package_data = True,
distclass = BinaryDistribution,
package_dir = { 'wiredtiger' : '.' },
cmdclass = { 'install': WTInstall, 'build_ext': WTBuildExt },
package_data = {
'wiredtiger' : [ wt_swig_lib_name, '*.py' ]
},
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Python',
'Programming Language :: Java',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
]
)
if pip_command == 'sdist':
shutil.rmtree(os.path.join(this_dir, 'stage'))
| 39.202934
| 81
| 0.641574
| 1,163
| 0.072533
| 0
| 0
| 0
| 0
| 0
| 0
| 7,103
| 0.442996
|
36e6a531b83457a4c48394e73a9fc94d96c25f64
| 77
|
py
|
Python
|
2022-02-24-ftx-rest-api-python/local_settings.py
|
georgehaan/analyzingalpha
|
f1f821e8d74d64addf410bfd205cb089ddf5517e
|
[
"Unlicense"
] | null | null | null |
2022-02-24-ftx-rest-api-python/local_settings.py
|
georgehaan/analyzingalpha
|
f1f821e8d74d64addf410bfd205cb089ddf5517e
|
[
"Unlicense"
] | null | null | null |
2022-02-24-ftx-rest-api-python/local_settings.py
|
georgehaan/analyzingalpha
|
f1f821e8d74d64addf410bfd205cb089ddf5517e
|
[
"Unlicense"
] | null | null | null |
ftxus = {
'api_key':'YOUR_API_KEY',
'api_secret':'YOUR_API_SECRET'
}
| 15.4
| 34
| 0.636364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.675325
|
36e87b1e11d644470443480a35f8b9e8b72438cd
| 4,387
|
py
|
Python
|
src/rechub/parameters.py
|
yusanshi/easy-rec
|
86db0bbd1eb0caf94c4b0ec4198bf49e64f65f24
|
[
"MIT"
] | null | null | null |
src/rechub/parameters.py
|
yusanshi/easy-rec
|
86db0bbd1eb0caf94c4b0ec4198bf49e64f65f24
|
[
"MIT"
] | null | null | null |
src/rechub/parameters.py
|
yusanshi/easy-rec
|
86db0bbd1eb0caf94c4b0ec4198bf49e64f65f24
|
[
"MIT"
] | null | null | null |
import argparse
from distutils.util import strtobool
def str2bool(x):
return bool(strtobool(x))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num_epochs', type=int, default=1000)
parser.add_argument('--learning_rate', type=float, default=0.0005)
parser.add_argument('--batch_size', type=int, default=4096)
parser.add_argument('--num_workers', type=int, default=16)
parser.add_argument('--non_graph_embedding_dim', type=int, default=200)
parser.add_argument('--graph_embedding_dims',
type=int,
nargs='+',
default=[200, 128, 64])
parser.add_argument(
'--neighbors_sampling_quantile',
type=float,
default=0.9,
help=
'Set the number of sampled neighbors to the quantile of the numbers of neighbors'
)
parser.add_argument('--min_neighbors_sampled', type=int, default=4)
parser.add_argument('--max_neighbors_sampled', type=int, default=512)
parser.add_argument('--single_attribute_dim', type=int,
default=40) # TODO: support attributes
parser.add_argument('--attention_query_vector_dim', type=int, default=200)
parser.add_argument(
'--dnn_predictor_dims',
type=int,
nargs='+',
default=[-1, 128, 1],
help=
'You can set first dim as -1 to make it automatically fit the input vector'
)
parser.add_argument('--num_batches_show_loss', type=int, default=50)
parser.add_argument('--num_epochs_validate', type=int, default=5)
parser.add_argument('--early_stop_patience', type=int, default=20)
parser.add_argument('--num_attention_heads', type=int, default=8)
parser.add_argument('--save_checkpoint', type=str2bool, default=False)
parser.add_argument('--different_embeddings', type=str2bool, default=False)
parser.add_argument('--negative_sampling_ratio', type=int, default=4)
parser.add_argument(
'--model_name',
type=str,
default='GCN',
choices=[
# Non-graph
'NCF',
# Graph with single type of edge (we think it as homogeneous graph)
'GCN',
'GAT',
'LightGCN',
'NGCF',
# Graph with multiple types of edge (we think it as heterogeneous graph)
'HET-GCN',
'HET-GAT',
'HET-NGCF',
'HET-LightGCN',
# To be categorized
'GraphRec',
'DeepFM',
'DSSM',
'DiffNet',
'DiffNet++',
'DANSER'
])
parser.add_argument('--embedding_aggregator',
type=str,
default='concat',
choices=['concat', 'attn'])
parser.add_argument('--predictor',
type=str,
default='dnn',
choices=['dot', 'dnn'])
parser.add_argument('--dataset_path', type=str, required=True)
parser.add_argument('--metadata_path', type=str, required=True)
parser.add_argument('--log_path', type=str, default='./log/')
parser.add_argument('--tensorboard_runs_path', type=str, default='./runs/')
parser.add_argument('--checkpoint_path', type=str, default='./checkpoint/')
parser.add_argument('--edge_choice',
type=int,
nargs='+',
default=[],
help='Left empty to use all in metadata file')
parser.add_argument('--training_task_choice',
type=int,
nargs='+',
default=[],
help='Left empty to use all in metadata file')
parser.add_argument('--evaluation_task_choice',
type=int,
nargs='+',
default=[],
help='Left empty to use all in `training_task_choice`')
parser.add_argument('--task_loss_overwrite', type=str, nargs='+')
parser.add_argument('--task_weight_overwrite', type=float, nargs='+')
args, unknown = parser.parse_known_args()
if len(unknown) > 0:
print(
'Warning: if you are not in testing mode, you may have got some parameters wrong input'
)
return args
| 38.823009
| 99
| 0.56713
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,472
| 0.335537
|
36e9553b230e4e00a0c8f9a0c28cdd825854c4a3
| 4,955
|
py
|
Python
|
course_difficulty.py
|
ewang26/dailytimedschedule
|
1d891828af67caab47ef6286051da7e84b980b2a
|
[
"MIT"
] | null | null | null |
course_difficulty.py
|
ewang26/dailytimedschedule
|
1d891828af67caab47ef6286051da7e84b980b2a
|
[
"MIT"
] | null | null | null |
course_difficulty.py
|
ewang26/dailytimedschedule
|
1d891828af67caab47ef6286051da7e84b980b2a
|
[
"MIT"
] | null | null | null |
# Koki Kapoor
# CSC 630
# Course Difficulty.py file
# have each homework assignment be ranked based on difficulty of the course and on difficulty of the assignment itself
# list_of_courses_and_difficulty only takes into consideration the difficulty of the course, not the assignment
from array import *
# install numpy in terminal with:
# dictionaries mapping difficulty level to their assigned descriptions
# commented this out to redefine the course difficulty and workload separately
"""
difficulty_levels = {
1:'Easy and quick',
2:'Easy but time-consuming',
3:'Medium',
4:'Hard material, quick work',
5:'Hard, tedious, and time-consuming'
}
"""
# difficulty_levels2 refers to the difficulty of the course's material, not how much time it takes
# ie, there can be a very time-consuming course that has easy material
difficulty_levels = {
1:'Easy',
2:'Easy-Medium',
3:'Medium',
4:'Medium-Hard',
5:'Hard'
}
#dictionary mapping the amount of time taken on a course's workload (which includes studying, tests, etc)
workload_levels = {
1:'1-1.9 hours',
2:'1.9-2.9 hours',
3:'2.9-3.9 hours',
4:'3.9-4.9 hours',
5:'4.9-5.9 hours',
6:'6+ hours'
}
def set_courses_and_difficulties():
# user input of course names
value_c = input("Please enter the names of all your courses with spaces in between each course name\n")
def get_courses():
# sets everything to upper case and removes surrounding whitespace, makes sure there is only one space between course names
courses = value_c.strip().upper()
return courses
format_courses = get_courses()
value_time = input("Please enter the amount of time (between 1 and 6 hours in whole numbers) that you spend completing work for each class every day.\n"
"The hours are as following:\n"
"\n".join([f'Level {level}: {timetaken_desc[level]}' for level in range(1,6)])+
f"\n\nReminder, your courses are: {format_courses}\n"
)
value_diff = input('\nPlease enter the difficulty of each course in the same order with spaces in between each ranking.\n' +
'The levels of difficulty are as following:\n' +
'\n'.join([f'Level {level}: {difficulty_desc[level]}' for level in range(1,6)])+
f'\n\nReminder, your courses are: {format_courses}\n')
def read_level_input(input_value):
input_vals = input_value.strip().split(' ') # strip whitespace from input value and split around spaces to create an array of strings
levels = [int(x) for x in input_vals] # cast to int
return levels
def string_to_array(s):
# defines a method that creates an array of strings, the strings consist of the content in between each spaces
return s.split(" ")
list_courses = string_to_array(get_courses())
list_timetaken = read_level_input(value_t)
list_difficulties = read_level_input(value_d)
# make a joint list
course_info = dict()
for i,course in enumerate(list_courses):
course_info[course] = dict()
course_info[course]['efficiency'] = list_timetaken[i]
course_info[course]['difficulty'] = list_difficulties[i]
print(course_info)
# map course difficulty and time taken to a description
list_difficulties_desc = [difficulty_desc[diff] for diff in list_difficulties]
list_timetaken_desc = [timetaken_desc[timetaken] for timetaken in list_timetaken]
print(f'\nYour course list:\n{list_courses}\nTheir corresponding difficulties:\n{list_difficulties_desc}\nTheir corresponding time taken:\n{list_timetaken_desc}')
num_courses = len(list_courses)
# integer that represents the length of the courses array, isn't used as of now but is here in case you need it later
def coursecheck():
#checks that the courses the user entered are in line with what they want
check = input("Please check that these are the courses you're taking by responding 'yes' or 'no'\n")
if check.lower() in ['yes', 'y']:
print(f'\nYay! You are ready to move on.')
elif check.lower() in ['no', 'n']:
set_courses_and_difficulties()
else:
print(f'\nError. Please specify "yes" or "no".')
coursecheck()
if __name__ == "__main__":
set_courses_and_difficulties()
coursecheck()
# A refined way to obtain the "difficulty of an assignment in a numerical form
# The course difficulty can weigh heavier and then the assignment diffculty can be added
# The modified parameters of this method are difficulty_level (of the course material) and workload_level (how much time you need to spend on the course)
def get_difficulty_index(difficulty_level, workload_level):
# Through a 'joint list' implemented via a Python dictionary, `course_info`
# make the course difficulty weighed more than the homework efficiency
index = (difficulty_level * 2) + workload_level
return index
| 36.703704
| 166
| 0.70333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,116
| 0.62886
|
36e98b5ce7e26ba1ac762413ca0565df029c2001
| 1,826
|
py
|
Python
|
src/esss_fix_format/hooks.py
|
nicoddemus/esss_fix_format
|
1f46e0d1c05cc88fd47be2f0b0f120d8265a759e
|
[
"MIT"
] | 20
|
2016-12-05T12:09:27.000Z
|
2021-11-23T21:57:59.000Z
|
src/esss_fix_format/hooks.py
|
nicoddemus/esss_fix_format
|
1f46e0d1c05cc88fd47be2f0b0f120d8265a759e
|
[
"MIT"
] | 43
|
2016-07-20T12:21:16.000Z
|
2022-03-14T20:31:07.000Z
|
src/esss_fix_format/hooks.py
|
nicoddemus/esss_fix_format
|
1f46e0d1c05cc88fd47be2f0b0f120d8265a759e
|
[
"MIT"
] | 8
|
2016-09-27T20:02:44.000Z
|
2021-04-16T14:58:08.000Z
|
import abc
import textwrap
class GitHook(metaclass=abc.ABCMeta):
"""
Base class to define a Git hook usable by `hooks` task.
"""
@abc.abstractmethod
def name(self):
"""
:rtype: unicode
:return: Name of hook.
"""
@abc.abstractmethod
def script(self):
"""
:rtype: unicode
:return: Script code. Omit the shebang, as it is added later by a post-process step when
hooks are installed in project.
"""
class FixFormatGitHook(GitHook):
"""
A hook that prevents developer from committing unless it respects formats expected by
our `fix-format` tool.
"""
def name(self):
return 'fix-format'
def script(self):
script = """\
if ! which fix-format >/dev/null 2>&1
then
echo "fix-format not found, install in an active environment with:"
echo " conda install esss_fix_format"
exit 1
else
git diff-index --diff-filter=ACM --name-only --cached HEAD | fix-format --check --stdin
returncode=$?
if [ "$returncode" != "0" ]
then
echo ""
echo "fix-format check failed (status=$returncode)! To fix, execute:"
echo " ff -c"
exit 1
fi
fi
"""
return textwrap.dedent(script)
def _add_hook(hook):
name = hook.name()
if name not in _HOOKS:
_HOOKS[name] = hook
else:
raise KeyError(f"A hook named '{name}' already exists")
# All hooks available by default
_HOOKS = {}
_add_hook(FixFormatGitHook())
def get_default_hook(name):
"""
:param unicode name: Name of a hook.
:rtype: GitHook
:return: A Git hook object.
"""
return _HOOKS[name]
| 23.714286
| 99
| 0.557503
| 1,378
| 0.754655
| 0
| 0
| 348
| 0.190581
| 0
| 0
| 1,247
| 0.682913
|
36e9aa3443706da87ee4f539703a4f5d9195cf72
| 166
|
py
|
Python
|
Solutions/print all subset.py
|
Adityasriram0901/Python-Thunder
|
192920c48092ce1783d56c7b45cdd7e7a50246fa
|
[
"MIT"
] | 81
|
2020-09-25T15:02:11.000Z
|
2020-10-12T14:20:31.000Z
|
Solutions/print all subset.py
|
Adityasriram0901/Python-Thunder
|
192920c48092ce1783d56c7b45cdd7e7a50246fa
|
[
"MIT"
] | 196
|
2020-09-25T13:52:13.000Z
|
2020-10-12T20:20:00.000Z
|
Solutions/print all subset.py
|
Adityasriram0901/Python-Thunder
|
192920c48092ce1783d56c7b45cdd7e7a50246fa
|
[
"MIT"
] | 209
|
2020-09-25T16:15:46.000Z
|
2020-10-12T20:08:08.000Z
|
a = [1, 2, 3, 4]
def subset(a, n):
if n == 1:
return n
else:
return (subset(a[n - 1]), subset(a[n - 2]))
print(subset(a, n=4))
| 13.833333
| 52
| 0.415663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
36eb37aac32d06e68b8f0f03ae15c8cd3b04fb1f
| 49
|
py
|
Python
|
trees/dasgupta/__init__.py
|
islamazhar/trees
|
502565c5bf02503c7bece09cddd93f9368da02c3
|
[
"MIT"
] | null | null | null |
trees/dasgupta/__init__.py
|
islamazhar/trees
|
502565c5bf02503c7bece09cddd93f9368da02c3
|
[
"MIT"
] | null | null | null |
trees/dasgupta/__init__.py
|
islamazhar/trees
|
502565c5bf02503c7bece09cddd93f9368da02c3
|
[
"MIT"
] | null | null | null |
from trees.dasgupta.costtree import DasguptaTree
| 24.5
| 48
| 0.877551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
36eb6ff512aad2f53a0ace07b5c62237d039ba4a
| 11,810
|
py
|
Python
|
examples/references/segmentation/pascal_voc2012/code/scripts/training.py
|
1nF0rmed/ignite
|
cfcd667e3cb9d67b67d928d12fa3ccdac05f7a3e
|
[
"BSD-3-Clause"
] | null | null | null |
examples/references/segmentation/pascal_voc2012/code/scripts/training.py
|
1nF0rmed/ignite
|
cfcd667e3cb9d67b67d928d12fa3ccdac05f7a3e
|
[
"BSD-3-Clause"
] | null | null | null |
examples/references/segmentation/pascal_voc2012/code/scripts/training.py
|
1nF0rmed/ignite
|
cfcd667e3cb9d67b67d928d12fa3ccdac05f7a3e
|
[
"BSD-3-Clause"
] | null | null | null |
# This a training script launched with py_config_runner
# It should obligatory contain `run(config, **kwargs)` method
import sys
from collections.abc import Mapping
from pathlib import Path
import torch
from apex import amp
from dataflow.datasets import VOCSegmentationOpencv
from py_config_runner.config_utils import TRAINVAL_CONFIG, assert_config, get_params
from py_config_runner.utils import set_seed
from utils import exp_tracking
from utils.handlers import predictions_gt_images_handler
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.engine import Engine, Events, create_supervised_evaluator
from ignite.handlers import DiskSaver
from ignite.metrics import ConfusionMatrix, IoU, mIoU
from ignite.utils import setup_logger
# Adds "code" folder to python path
sys.path.insert(0, Path(__file__).parent.parent.as_posix())
def initialize(config):
model = config.model.to(config.device)
optimizer = config.optimizer
# Setup Nvidia/Apex AMP
model, optimizer = amp.initialize(model, optimizer, opt_level=getattr(config, "fp16_opt_level", "O2"), num_losses=1)
# Adapt model to dist conf
model = idist.auto_model(model)
criterion = config.criterion.to(config.device)
return model, optimizer, criterion
def get_save_handler(config):
if exp_tracking.has_clearml:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config.output_path.as_posix())
return DiskSaver(config.output_path.as_posix())
def create_trainer(model, optimizer, criterion, train_sampler, config, logger):
prepare_batch = config.prepare_batch
device = config.device
# Setup trainer
accumulation_steps = getattr(config, "accumulation_steps", 1)
model_output_transform = getattr(config, "model_output_transform", lambda x: x)
def train_update_function(engine, batch):
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=True)
y_pred = model(x)
y_pred = model_output_transform(y_pred)
loss = criterion(y_pred, y)
if isinstance(loss, Mapping):
assert "supervised batch loss" in loss
loss_dict = loss
output = {k: v.item() for k, v in loss_dict.items()}
loss = loss_dict["supervised batch loss"] / accumulation_steps
else:
output = {"supervised batch loss": loss.item()}
with amp.scale_loss(loss, optimizer, loss_id=0) as scaled_loss:
scaled_loss.backward()
if engine.state.iteration % accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return output
output_names = getattr(config, "output_names", ["supervised batch loss",])
lr_scheduler = config.lr_scheduler
trainer = Engine(train_update_function)
trainer.logger = logger
to_save = {"model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler, "trainer": trainer, "amp": amp}
save_every_iters = getattr(config, "save_every_iters", 1000)
common.setup_common_training_handlers(
trainer,
train_sampler,
to_save=to_save,
save_every_iters=save_every_iters,
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
with_gpu_stats=exp_tracking.has_mlflow,
output_names=output_names,
with_pbars=False,
)
if idist.get_rank() == 0:
common.ProgressBar(persist=False).attach(trainer, metric_names="all")
return trainer
def create_evaluators(model, metrics, config):
model_output_transform = getattr(config, "model_output_transform", lambda x: x)
evaluator_args = dict(
model=model,
metrics=metrics,
device=config.device,
non_blocking=True,
prepare_batch=config.prepare_batch,
output_transform=lambda x, y, y_pred: (model_output_transform(y_pred), y,),
)
train_evaluator = create_supervised_evaluator(**evaluator_args)
evaluator = create_supervised_evaluator(**evaluator_args)
if idist.get_rank() == 0:
common.ProgressBar(desc="Evaluation (train)", persist=False).attach(train_evaluator)
common.ProgressBar(desc="Evaluation (val)", persist=False).attach(evaluator)
return evaluator, train_evaluator
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {int(elapsed)} - {tag} metrics:\n {metrics_output}")
def log_basic_info(logger, config):
msg = f"\n- PyTorch version: {torch.__version__}"
msg += f"\n- Ignite version: {ignite.__version__}"
msg += f"\n- Cuda device name: {torch.cuda.get_device_name(idist.get_local_rank())}"
logger.info(msg)
if idist.get_world_size() > 1:
msg = "\nDistributed setting:"
msg += f"\tbackend: {idist.backend()}"
msg += f"\trank: {idist.get_rank()}"
msg += f"\tworld size: {idist.get_world_size()}"
logger.info(msg)
def training(local_rank, config, logger=None):
if not getattr(config, "use_fp16", True):
raise RuntimeError("This training script uses by default fp16 AMP")
torch.backends.cudnn.benchmark = True
set_seed(config.seed + local_rank)
train_loader, val_loader, train_eval_loader = config.train_loader, config.val_loader, config.train_eval_loader
# Setup model, optimizer, criterion
model, optimizer, criterion = initialize(config)
# Setup trainer for this specific task
trainer = create_trainer(model, optimizer, criterion, train_loader.sampler, config, logger)
# Setup evaluators
num_classes = config.num_classes
cm_metric = ConfusionMatrix(num_classes=num_classes)
val_metrics = {
"IoU": IoU(cm_metric),
"mIoU_bg": mIoU(cm_metric),
}
if hasattr(config, "val_metrics") and isinstance(config.val_metrics, dict):
val_metrics.update(config.val_metrics)
evaluator, train_evaluator = create_evaluators(model, val_metrics, config)
val_interval = getattr(config, "val_interval", 1)
@trainer.on(Events.EPOCH_COMPLETED(every=val_interval))
def run_validation():
epoch = trainer.state.epoch
state = train_evaluator.run(train_eval_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(val_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
if config.num_epochs % val_interval != 0:
trainer.add_event_handler(Events.COMPLETED, run_validation)
if getattr(config, "start_by_validation", False):
trainer.add_event_handler(Events.STARTED, run_validation)
score_metric_name = "mIoU_bg"
if hasattr(config, "es_patience"):
common.add_early_stopping_by_val_score(config.es_patience, evaluator, trainer, metric_name=score_metric_name)
# Store 3 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models=model,
metric_name=score_metric_name,
n_saved=3,
trainer=trainer,
tag="val",
)
if idist.get_rank() == 0:
tb_logger = common.setup_tb_logging(
config.output_path.as_posix(),
trainer,
optimizer,
evaluators={"training": train_evaluator, "validation": evaluator},
)
if not exp_tracking.has_clearml:
exp_tracking_logger = exp_tracking.setup_logging(
trainer, optimizer, evaluators={"training": train_evaluator, "validation": evaluator}
)
# Log validation predictions as images
# We define a custom event filter to log less frequently the images (to reduce storage size)
# - we plot images with masks of the middle validation batch
# - once every 3 validations and
# - at the end of the training
def custom_event_filter(_, val_iteration):
c1 = val_iteration == len(val_loader) // 2
c2 = trainer.state.epoch % (getattr(config, "val_interval", 1) * 3) == 0
c2 |= trainer.state.epoch == config.num_epochs
return c1 and c2
tb_logger.attach(
evaluator,
log_handler=predictions_gt_images_handler(
img_denormalize_fn=config.img_denormalize, n_images=15, another_engine=trainer, prefix_tag="validation"
),
event_name=Events.ITERATION_COMPLETED(event_filter=custom_event_filter),
)
# Log confusion matrix to ClearML:
if exp_tracking.has_clearml:
@trainer.on(Events.COMPLETED)
def compute_and_log_cm():
cm = cm_metric.compute()
# CM: values are normalized such that diagonal values represent class recalls
cm = ConfusionMatrix.normalize(cm, "recall").cpu().numpy()
if idist.get_rank() == 0:
try:
from clearml import Task
except ImportError:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
clearml_logger = Task.current_task().get_logger()
clearml_logger.report_confusion_matrix(
title="Final Confusion Matrix",
series="cm-preds-gt",
matrix=cm,
iteration=trainer.state.iteration,
xlabels=VOCSegmentationOpencv.target_names,
ylabels=VOCSegmentationOpencv.target_names,
)
trainer.run(train_loader, max_epochs=config.num_epochs)
if idist.get_rank() == 0:
tb_logger.close()
if not exp_tracking.has_clearml:
exp_tracking_logger.close()
def run(config, **kwargs):
"""This is the main method to run the training. As this training script is launched with `py_config_runner`
it should obligatory contain `run(config, **kwargs)` method.
"""
assert torch.cuda.is_available(), torch.cuda.is_available()
assert torch.backends.cudnn.enabled, "Nvidia/Amp requires cudnn backend to be enabled."
with idist.Parallel(backend="nccl") as parallel:
logger = setup_logger(name="Pascal-VOC12 Training", distributed_rank=idist.get_rank())
assert_config(config, TRAINVAL_CONFIG)
# The following attributes are automatically added by py_config_runner
assert hasattr(config, "config_filepath") and isinstance(config.config_filepath, Path)
assert hasattr(config, "script_filepath") and isinstance(config.script_filepath, Path)
if idist.get_rank() == 0 and exp_tracking.has_clearml:
try:
from clearml import Task
except ImportError:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
task = Task.init("Pascal-VOC12 Training", config.config_filepath.stem)
task.connect_configuration(config.config_filepath.as_posix())
log_basic_info(logger, config)
config.output_path = Path(exp_tracking.get_output_path())
# dump python files to reproduce the run
exp_tracking.log_artifact(config.config_filepath.as_posix())
exp_tracking.log_artifact(config.script_filepath.as_posix())
exp_tracking.log_params(get_params(config, TRAINVAL_CONFIG))
try:
parallel.run(training, config, logger=logger)
except KeyboardInterrupt:
logger.info("Catched KeyboardInterrupt -> exit")
except Exception as e: # noqa
logger.exception("")
raise e
| 35.896657
| 120
| 0.676545
| 0
| 0
| 0
| 0
| 1,346
| 0.113971
| 0
| 0
| 2,278
| 0.192887
|
36ec49281113de21af1d91215fc919058901c862
| 17,387
|
py
|
Python
|
src/instaBot.py
|
pabloqb2000/py-instabot
|
2bfdd51d588050d370d069db5d0352d29fd4560d
|
[
"Apache-2.0"
] | null | null | null |
src/instaBot.py
|
pabloqb2000/py-instabot
|
2bfdd51d588050d370d069db5d0352d29fd4560d
|
[
"Apache-2.0"
] | null | null | null |
src/instaBot.py
|
pabloqb2000/py-instabot
|
2bfdd51d588050d370d069db5d0352d29fd4560d
|
[
"Apache-2.0"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from tqdm import tqdm
import random
from EmailSender import *
class InstagramBot:
# Creates object and starts the browser
def __init__(self, username, password):
print("Hi, i'm your personal bot")
print("Im using account: @" + username)
self.username = username
self.password = password
self.driver = webdriver.Firefox()
self.followers = None
self.following = None
sleep(1)
# Logs in instagram.com
def login(self):
# Open web page
driver = self.driver
driver.get("https://www.instagram.com/")
sleep(4)
'''# Click login button
login_button = driver.find_element_by_xpath("//a[@href='/accounts/login/?source=auth_switcher']")
login_button.click()
sleep(3)'''
# Enter data
print("Trying to log in as: " + self.username)
user_name_elem = driver.find_element_by_xpath("//input[@name='username']")
user_name_elem.clear()
user_name_elem.send_keys(self.username)
passworword_elem = driver.find_element_by_xpath("//input[@name='password']")
passworword_elem.clear()
if len(self.password) > 1:
passworword_elem.send_keys(self.password)
passworword_elem.send_keys(Keys.RETURN)
sleep(8)
else:
sleep(20)
# Disable pop ups
for i in range(3):
try:
self.navigateToProfile()
break
except Exception:
pass
try:
not_download = driver.find_element_by_xpath("//a[@class='_3m3RQ _7XMpj']")
not_download.click()
sleep(4)
self.navigateToProfile()
break
except Exception:
pass
try:
not_now_button = driver.find_element_by_xpath("//button[@class='aOOlW HoLwm ']")
not_now_button.click()
sleep(4)
self.navigateToProfile()
break
except Exception:
pass
self.goToMain()
# sets it selfs parameters
def setFollowers(self):
driver = self.driver
self.goToProfile()
following, followers = self.getFollowLists(self.username)
self.following = following
self.followers = followers
# Goes to the main page of insta
def goToMain(self):
driver = self.driver
driver.get("https://www.instagram.com/")
sleep(2)
# Goes to the profile by clicking in the profile button
def navigateToProfile(self):
driver = self.driver
profile_link = driver.find_element_by_xpath('//a[@class="gmFkV"]')
profile_link.click()
sleep(2)
# Goes to the user profile page
def goToProfile(self):
self.lookForAccount(self.username)
# searches for the given account
def searchForAccount(self, account):
driver = self.driver
seach_box = driver.find_element_by_xpath("//input[@placeholder='Search']")
seach_box.clear()
seach_box.send_keys(account)
sleep(2)
seach_box.send_keys(Keys.ARROW_DOWN)
sleep(0.5)
for i in range(6):
seach_box.send_keys(Keys.ARROW_UP)
sleep(0.2)
sleep(1)
seach_box.send_keys(Keys.RETURN)
sleep(3)
# directly goes to the profile of the given account
def lookForAccount(self, account):
driver = self.driver
driver.get("https://www.instagram.com/"+account+"/")
sleep(3)
def followAccount(self, account):
driver = self.driver
self.lookForAccount(account)
follow_btn = driver.find_elements_by_xpath('//button[@class="_5f5mN jIbKX _6VtSN yZn4P "]')
follow_btn = follow_btn + driver.find_elements_by_xpath('//button[@class="BY3EC sqdOP L3NKy y3zKF "]')
if len(follow_btn) > 0:
follow_btn[0].click()
sleep(1)
# searches the given hastag
def searchHastag(self, htg):
self.searchForAccount(htg)
# NOT TESTED !!!!!
def followInScreen(self):
driver = self.driver
posts = self.getPostList(200)
for post in posts:
driver.get(post)
sleep(15)
try:
follow_button = driver.find_element_by_xpath("//button[@class='oW_lN sqdOP yWX7d y3zKF ']")
follow_button.click()
like_button = driver.find_element_by_xpath("//button[@class='dCJp8 afkep']")
like_button.click()
sleep(15)
except Exception:
print("exception")
# Returns the number of followers and follows of the current profile
def getFollowersNum(self):
driver = self.driver
spans = driver.find_elements_by_xpath("//span[@class='g47SY ']")
values = [self.get_text(s).replace(",", "").replace(".", "") for s in spans]
return int(values[1]), int(values[2])
# Returns the number of posts of the given account
def getPostNum(self, account):
driver = self.driver
self.lookForAccount(account)
spans = driver.find_elements_by_xpath("//span[@class='g47SY ']")
values = [self.get_text(s) for s in spans]
return int(values[0])
# Returns the HTML text inside an element
def get_text(self, el):
return self.driver.execute_script("""
var parent = arguments[0];
var child = parent.firstChild;
var ret = "";
while(child) {
if (child.nodeType === Node.TEXT_NODE)
ret += child.textContent;
child = child.nextSibling;
}
return ret;
""", el)
# Returns the following and the follower lists of a given account
def getFollowLists(self, account):
driver = self.driver
n_followers, n_following = self.getFollowersNum()
# Get following:
following_button = driver.find_element_by_xpath("//a[@href='/" + account + "/following/']")
following_button.click()
sleep(2)
for i in range(int(n_following / 8)):
last_follow = driver.find_elements_by_xpath("//a[@class='FPmhX notranslate _0imsa ']")[-1]
driver.execute_script("arguments[0].scrollIntoView(true);", last_follow)
sleep(1)
following_a = driver.find_elements_by_xpath("//a[@class='FPmhX notranslate _0imsa ']")
following = [f.get_property("title") for f in following_a]
self.lookForAccount(account)
#close_button = driver.find_element_by_xpath("/html/body/div[4]/div/div[1]/div/div[2]/button/svg")
#close_button.click()
sleep(3)
# Get followers
following_button = driver.find_element_by_xpath("//a[@href='/" + account + "/followers/']")
following_button.click()
sleep(2)
for i in range(int(n_followers / 8)):
last_follow = driver.find_elements_by_xpath("//a[@class='FPmhX notranslate _0imsa ']")[-1]
driver.execute_script("arguments[0].scrollIntoView(true);", last_follow)
sleep(1)
following_a = driver.find_elements_by_xpath("//a[@class='FPmhX notranslate _0imsa ']")
followers = [f.get_property("title") for f in following_a]
#close_button = driver.find_element_by_xpath("//span[@class='glyphsSpriteX__outline__24__grey_9 u-__7' and"
# " @aria-label='Cerrar']")
#close_button.click()
self.lookForAccount(account)
sleep(3)
return following, followers
# NOT TESTED !!!
def getPostList(self, n_posts=144):
driver = self.driver
for i in range(int(n_posts / 12)):
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
sleep(3)
posts_a = driver.find_elements_by_xpath("//div[@class='v1Nh3 kIKUG _bz0w']/a")
return [elem.get_attribute('href') for elem in posts_a if '.com/p/' in elem.get_attribute('href')]
# NOT TESTED !!!
def getCommenters(self, comment=[]):
driver = self.driver
commenters = driver.find_elements_by_xpath('//a[@class="FPmhX notranslate TlrDj"]')
comments = driver.find_elements_by_xpath('//div[@class="C4VMK"]/span')
comments = [self.get_text(comments[i]) for i in range(len(commenters)) if commenters[i].get_attribute("title") != self.username]
commenters = [c.get_attribute("title") for c in commenters if c.get_attribute("title") != self.username]
if comment != []:
commenters = [commenters[i] for i in range(len(comments)) if comments[i] in comment]
comments = [c for c in comments if c in comment]
return commenters, comments
# Check who isn't following back an account, if account == None => checks it for itself
def checkFollowersOf(self, account):
driver = self.driver
if account:
self.lookForAccount(account)
else:
self.goToProfile()
account = self.username
following, followers = self.getFollowLists(account)
print("People that don't follow " + account + " back: ")
for f in following:
if not f in followers:
print(f)
self.goToMain()
# Return the people that don't follow the given account back
# The given account should be followed by the bot
def getFekasOf(self, account):
driver = self.driver
self.lookForAccount(account)
following, followers = self.getFollowLists(account)
return [f for f in following if not f in followers]
# Return the people that follow the given account but the given account doesn't follow back
# The given account should be followed by the bot
def getCreepiesOf(self, account):
driver = self.driver
self.lookForAccount(account)
following, followers = self.getFollowLists(account)
return [f for f in followers if not f in following]
# Likes all post from a given account, use dislike to dislike them
def likeAll(self, account, dislike=False):
driver = self.driver
print("Liking all photos from: " + account)
self.lookForAccount(account)
n_posts = self.getPostNum(account)
posts_href = self.getPostList(n_posts)
self.likeList(posts_href, dislike)
# Likes all posts in the list use dislike to dislike them
def likeList(self, list, pause=2, dislike=False):
for post in tqdm(list, desc="(Dis)Likes"):
self.like(post, dislike)
sleep(pause)
# likes the given post, use dislike option to dislike
def like(self, post, dislike=False):
driver = self.driver
if dislike: # Dislike xPath
#xPath = '//button[@class="wpO6b "]'
xPath = '//*[//*[name()="svg"] and @class="_8-yf5 " and @aria-label="Unlike" and @height="24" and @width="24"]' # and @height="24" and width="24"
else: # Like xPath
xPath = '//*[//*[name()="svg"] and @class="_8-yf5 " and @aria-label="Like" and @height="24" and @width="24"]' # and @height="24" and width="24"
driver.get(post)
sleep(3)
try:
like_button = lambda: driver.find_element_by_xpath(xPath).click()
like_button()
except Exception as e:
if dislike:
print("Didn't dislike ;(\n" + str(e))
else:
print("Didn't like ;(\n" + str(e))
sleep(2)
# Un follows account
def unfollow(self, account):
driver = self.driver
self.lookForAccount(account)
try:
unfollow_button = driver.find_element_by_xpath('//button[@class="_5f5mN -fzfL _6VtSN yZn4P "]')
unfollow_button.click()
sleep(1)
unfollow_button = driver.find_element_by_xpath('//button[@class="aOOlW -Cab_ "]')
unfollow_button.click()
sleep(1)
except Exception as e:
print("Couldn't unfollow " + account + "\n" + str(e))
# Follows given account
def follow(self, account):
driver = self.driver
self.lookForAccount(account)
try:
follow_button = driver.find_element_by_xpath('//button[@class="_5f5mN jIbKX _6VtSN yZn4P "]')
follow_button.click()
sleep(2)
self.goToMain()
except Exception as e:
print("Couldn't follow " + str(e))
# Accepts all follow requests filtering by a usernames list if given
def acceptFollows(self, filter=None):
driver = self.driver
activity_button = driver.find_element_by_xpath("//a[@class='_0ZPOP kIKUG ']")
activity_button.click()
sleep(2)
try:
accept_button = driver.find_element_by_xpath("//span[@class='BcJ68']")
accept_button.click()
sleep(2)
follow_list = driver.find_elements_by_xpath("//a[@class='FPmhX notranslate yrJyr']")
follow_list = [f.get_attribute("title") for f in follow_list]
button_list = driver.find_elements_by_xpath("//button[@class='sqdOP L3NKy y3zKF ']")
button_list = [b for b in button_list if self.get_text(b) == 'Confirm']
if filter != None:
button_list = [button_list[i] for i in range(len(filter)) if follow_list[i] in filter]
for b in button_list:
b.click()
except Exception as e:
print("Didn't accept" + str(e))
self.lookForAccount(self.username)
self.goToMain()
# Unstable
def randomTag(self, photo, num):
driver = self.driver
driver.get(photo)
sleep(3)
comment_txtBox = driver.find_element_by_xpath('//textarea[@class="Ypffh"]')
#driver.execute_script("arguments[0].textContent = 'Hola carcola';", comment_txtBox)
sleep(4)
try:
comment_txtBox.send_keys(Keys.ENTER)
except Exception:
comment_txtBox.send_keys(Keys.ENTER)
#comment_txtBox.send_keys("caracola")
'''
# comment_txtBox.send_keys()
for n in range(num):
comment = "@" + random.choice("bcdfghjklmnpqrstvwxyz") + random.choice("aeiouy")
#comment_txtBox.send_keys(comment)
comment_txtBox.send_keys("hola")
sleep(2)
for _ in range(2):
comment_txtBox.send_keys(Keys.ENTER)
sleep(0.5)
comment_txtBox.send_keys(Keys.ENTER)
sleep(0.5)'''
# Open chat window from main menu
def chatMenu(self):
driver = self.driver
driver.get("https://www.instagram.com/direct/inbox/")
sleep(4)
# From the profile menu return true if the bot has un read chats
def hasNewChats(self):
driver = self.driver
div = driver.find_elements_by_xpath('//div[@class="J_0ip Vpz-1 TKi86 "]')
return len(div) > 0
# Return the account with unread chats
def getNewChats(self):
driver = self.driver
chats = driver.find_elements_by_xpath('//div[@class="_7UhW9 xLCgt qyrsm KV-D4 fDxYl "]')
return [self.get_text(c) for c in chats]
# Open the chat to talk to a given account
def openChat(self, account):
driver = self.driver
# Unread chats
chats = driver.find_elements_by_xpath('//div[@class="_7UhW9 xLCgt qyrsm KV-D4 fDxYl "]')
# Read chats
chats = chats + driver.find_elements_by_xpath('//div[@class="_7UhW9 xLCgt qyrsm KV-D4 fDxYl "]/div/div/div')
matching = [c for c in chats if self.get_text(c) == account][-1]
matching.click()
sleep(1)
# Read the messages of the currently open chat
def read_msgs(self):
driver = self.driver
msgs = driver.find_elements_by_xpath('//div[@class=" Igw0E IwRSH YBx95 _4EzTm XfCBB g6RW6 "]/div/span')
msgs = [self.get_text(m) for m in msgs]
return msgs
# Send msg in the chat currently open
def sendMsg(self, msg="Hi"):
driver = self.driver
txtarea = driver.find_element_by_xpath('//textarea[@placeholder="Message..."]')
txtarea.click()
sleep(0.2)
txtarea.send_keys(msg)
txtarea.send_keys(Keys.RETURN)
# Closes browser
def closeBrowser(self):
self.driver.close()
| 38.897092
| 256
| 0.572554
| 17,209
| 0.989762
| 0
| 0
| 0
| 0
| 0
| 0
| 5,565
| 0.320067
|
36edb3403cd5d8abc890118c85bd880dd47b74ce
| 198
|
py
|
Python
|
Python/03 - Strings/String Formatting.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
Python/03 - Strings/String Formatting.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
Python/03 - Strings/String Formatting.py
|
sohammanjrekar/HackerRank
|
1f5010133a1ac1e765e855a086053c97d9e958be
|
[
"MIT"
] | null | null | null |
def print_formatted(number):
# your code goes here
for i in range(1, number +1):
width = len(f"{number:b}")
print(f"{i:{width}} {i:{width}o} {i:{width}X} {i:{width}b}")
| 33
| 69
| 0.545455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.444444
|
36ede9c0901ffceceb90ea9e2eb43efe24230727
| 813
|
py
|
Python
|
BattleCity-NES/main.py
|
iOsnaaente/Kata-train_Code
|
22cdf9d087bad879875c1f70029bda0771242c50
|
[
"MIT"
] | null | null | null |
BattleCity-NES/main.py
|
iOsnaaente/Kata-train_Code
|
22cdf9d087bad879875c1f70029bda0771242c50
|
[
"MIT"
] | null | null | null |
BattleCity-NES/main.py
|
iOsnaaente/Kata-train_Code
|
22cdf9d087bad879875c1f70029bda0771242c50
|
[
"MIT"
] | null | null | null |
#! usr/bin/dev python
from stages import Stages #Le as fases
from code import tanks #Responsável pelos tanques do player
from images import imagens #imagens do jogo
import pygame
import random
screen_Dimension=[32*20,32*20]
pygame.init()
screen = pygame.display.set_mode(screen_Dimension)
pygame.display.set_caption("My_Poor_NES_Batlle_City")
clock = pygame.time.Clock()
Fase_1 = Stages.Stages(screen)
Fase_1.readStage(1)
Tank = tanks.PlayerTank(imagens.blueTank, [64,64], screen)
while True:
screen.fill([0,0,0])
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
Tank.move(event)
Fase_1.plotStage()
Tank.plot()
pygame.display.update()
clock.tick(60)
| 16.591837
| 62
| 0.719557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 111
| 0.136364
|
36ee554e3410e965f70042cc4e96c4361520515d
| 6,271
|
py
|
Python
|
n_queens.py
|
lkk7/n-queens-genetic-solver
|
d8b87b49970e58d4599618eb014c1b12e4f471fa
|
[
"MIT"
] | null | null | null |
n_queens.py
|
lkk7/n-queens-genetic-solver
|
d8b87b49970e58d4599618eb014c1b12e4f471fa
|
[
"MIT"
] | null | null | null |
n_queens.py
|
lkk7/n-queens-genetic-solver
|
d8b87b49970e58d4599618eb014c1b12e4f471fa
|
[
"MIT"
] | null | null | null |
from typing import Dict
from numba import njit
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'binary'
def read_parameters(filename: str) -> Dict[str, float]:
"""Read parameters from a file to a dictionary and return it."""
parameters = {}
with open(filename, "r") as file:
for line in file.readlines():
if line != '\n':
line_split = line.split()
try:
parameters[line_split[0]] = int(line_split[2])
except ValueError:
parameters[line_split[0]] = float(line_split[2])
if len(parameters) != 6:
raise RuntimeError("Incorrect list of parameters in " + filename)
return parameters
def random_population(population_size: int, board_size: int) -> np.ndarray:
"""Return a random population of solutions."""
return np.array([np.random.permutation(board_size)
for _ in range(population_size)], dtype=np.int32)
@njit
def fitness(population: np.ndarray) -> np.ndarray:
"""Return an array of fitnesses of a given population"""
fitness_arr = np.empty(population.shape[0], dtype=np.float32)
for i, genome in enumerate(population):
diags_1 = np.array([0 for n in range(2 * genome.size - 1)])
diags_2 = np.array([0 for n in range(2 * genome.size - 1)])
for j in range(genome.size):
diags_1[j - genome[j] + genome.size - 1] += 1
diags_2[j + genome[j]] += 1
colls_1 = diags_1 > 1
colls_2 = diags_2 > 1
diags_1[colls_1] = diags_1[colls_1] * (diags_1[colls_1] - 1) // 2
diags_1[~colls_1] = 0
diags_2[colls_2] = diags_2[colls_2] * (diags_2[colls_2] - 1) // 2
diags_2[~colls_2] = 0
fitness_arr[i] = 1 / (1 + np.sum(diags_1) + np.sum(diags_2))
return fitness_arr
@njit
def selection(population: np.ndarray, n_best: int) -> np.ndarray:
"""Return an array of indices of individuals selected to mate.
n_best is the number of best individuals who will always be selected.
"""
fitnesses = fitness(population)
winners = np.empty((population.shape[0] // 2,), dtype=np.int32)
winners[0:n_best] = np.argsort(fitnesses)[-n_best:]
for i in range(n_best, fitnesses.shape[0] // 2):
pair = np.random.randint(0, fitnesses.shape[0], size=(2,))
if fitnesses[pair[0]] > fitnesses[pair[1]]:
winners[i] = pair[0]
else:
winners[i] = pair[1]
return winners
@njit
def crossover(population: np.ndarray, selected: np.ndarray):
"""Return a new population that results from crossover."""
N = population.shape[1]
new_population = np.empty_like(population)
for k in range(0, selected.shape[0]):
parents_ids = np.random.choice(selected, replace=False, size=2)
child_1 = np.empty_like(population[parents_ids[0]])
child_2 = np.empty_like(population[parents_ids[1]])
points = np.random.randint(0, N + 1, 2)
if points[0] != points[1]:
points = (np.min(points), np.max(points))
else:
if points[0] == N:
points = (points[0] - 1, points[0])
else:
points = (points[0], points[0] + 1)
cut_out = population[parents_ids[0]][points[0]:points[1]]
child_1[points[0]:points[1]] = cut_out
j = 0
for i in range(N):
if j == points[0]:
j = points[1]
if not np.any(cut_out == population[parents_ids[1]][i]):
child_1[j] = population[parents_ids[1]][i]
j += 1
cut_out = population[parents_ids[1]][points[0]:points[1]]
child_2[points[0]:points[1]] = cut_out
j = 0
for i in range(N):
if j == points[0]:
j = points[1]
if not np.any(cut_out == population[parents_ids[0]][i]):
child_2[j] = population[parents_ids[0]][i]
j += 1
new_population[2 * k, :] = child_1
new_population[2 * k + 1, :] = child_2
return new_population
@njit
def mutation(population: np.ndarray):
"""Perform mutation on a population."""
for i in range(population.shape[0]):
if np.random.random() > 0.7:
for _ in range(3):
points = np.random.randint(0, population.shape[1], 2)
tmp = population[i, points[0]]
population[i, points[0]] = population[i, points[1]]
population[i, points[1]] = tmp
def plot_genome_expression(genome: np.ndarray) -> None:
"""Plot a solution represented by the given genome."""
points = np.zeros((genome.shape[0], genome.shape[0]))
for i, g in enumerate(genome):
points[i, g] = 1
_, ax = plt.subplots(figsize=(10, 10))
ax.imshow(points, cmap='Purples')
ax.grid(True)
ax.set_xlim(-0.5, genome.shape[0] - 0.5)
ax.set_ylim(-0.5, genome.shape[0] - 0.5)
ax.set_xticks([i + 0.5 for i in range(genome.shape[0])])
ax.set_yticks([i + 0.5 for i in range(genome.shape[0])])
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.tick_params(axis='both', which='both', bottom=False, left=False)
plt.title("$N = {}$".format(genome.shape[0]), size=15)
plt.show()
def main() -> None:
parameters = read_parameters('parameters.txt')
population = random_population(parameters['pop_size'], parameters['N'])
generation_data = []
best_member_id = 0
winner_gen = parameters['generations']
for i in range(1, parameters['generations'] + 1):
selected = selection(population, parameters['n_best'])
population = crossover(population, selected)
mutation(population)
gen_fit = fitness(population)
best_member_id = np.argmax(gen_fit)
generation_data.append([i, gen_fit.mean(), gen_fit[best_member_id]])
if gen_fit[best_member_id] == 1.0:
print("\nWinner (gen. {}):\n{}".format(
i, str(population[best_member_id])))
winner_gen = i
break
if i % 50 == 0:
print("Gen", i)
if parameters['plot_winner_genome']:
plot_genome_expression(population[best_member_id])
if __name__ == "__main__":
main()
| 38.237805
| 76
| 0.591293
| 0
| 0
| 0
| 0
| 3,527
| 0.56243
| 0
| 0
| 676
| 0.107798
|
36f0a039978f0025fa6da35feb5807f99a23cd6a
| 1,362
|
py
|
Python
|
tests/plugins/test_ustreamtv.py
|
RomanKornev/streamlink
|
acdefee0822b9c10628b91a166f9abe084e44800
|
[
"BSD-2-Clause"
] | 2
|
2019-09-17T15:01:47.000Z
|
2019-09-21T16:26:50.000Z
|
tests/plugins/test_ustreamtv.py
|
RomanKornev/streamlink
|
acdefee0822b9c10628b91a166f9abe084e44800
|
[
"BSD-2-Clause"
] | 1
|
2020-06-02T02:36:30.000Z
|
2020-06-02T02:36:30.000Z
|
tests/plugins/test_ustreamtv.py
|
bumplzz69/streamlink
|
34abc43875d7663ebafa241573dece272e93d88b
|
[
"BSD-2-Clause"
] | 1
|
2020-08-12T08:27:22.000Z
|
2020-08-12T08:27:22.000Z
|
import unittest
from streamlink import Streamlink
try:
from unittest.mock import ANY, MagicMock, call
except ImportError:
from mock import ANY, MagicMock, call
from streamlink.plugins.ustreamtv import UStreamTV
class TestPluginUStreamTV(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
"http://www.ustream.tv/streamlink",
"http://www.ustream.tv/channel/id/1234",
"http://www.ustream.tv/embed/1234",
"http://www.ustream.tv/recorded/6543",
"http://www.ustream.tv/embed/recorded/6543",
]
for url in should_match:
self.assertTrue(UStreamTV.can_handle_url(url))
should_not_match = [
"https://www.youtube.com/v/aqz-KE-bpKQ",
]
for url in should_not_match:
self.assertFalse(UStreamTV.can_handle_url(url))
def test_arguments(self):
from streamlink_cli.main import setup_plugin_args
session = Streamlink()
parser = MagicMock()
plugin_parser = MagicMock()
parser.add_argument_group = MagicMock(return_value=plugin_parser)
session.plugins = {
'ustreamtv': UStreamTV
}
setup_plugin_args(session, parser)
plugin_parser.add_argument.assert_called_with('--ustream-password', metavar="PASSWORD", help=ANY)
| 30.954545
| 105
| 0.64978
| 1,138
| 0.835536
| 0
| 0
| 0
| 0
| 0
| 0
| 267
| 0.196035
|
36f1c4a0f6e35abb7375acc751edca8cda2db44e
| 304
|
py
|
Python
|
tests/test_xgboost.py
|
ak110/dl_allinone
|
976f0d65b20bcf9bfc00286608bcd957dd086209
|
[
"MIT"
] | 1
|
2019-02-07T03:48:19.000Z
|
2019-02-07T03:48:19.000Z
|
tests/test_xgboost.py
|
ak110/dl_allinone
|
976f0d65b20bcf9bfc00286608bcd957dd086209
|
[
"MIT"
] | 1
|
2019-03-26T03:48:15.000Z
|
2019-05-24T04:12:33.000Z
|
tests/test_xgboost.py
|
ak110/dl_allinone
|
976f0d65b20bcf9bfc00286608bcd957dd086209
|
[
"MIT"
] | null | null | null |
def test_run():
import sklearn.datasets
import xgboost
data = sklearn.datasets.load_boston()
X, y = data.data, data.target # pylint: disable=no-member
xgb = xgboost.XGBRegressor(n_estimators=3)
xgb.fit(X[:100], y[:100])
assert xgb.predict(X[100:]).shape == (len(X[100:]),)
| 27.636364
| 62
| 0.648026
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.088816
|
36f2445925b38eafa6fa76d91317ba20cacff47f
| 1,241
|
py
|
Python
|
test/unit/object/test_collaboration_allowlist_entry.py
|
box/box-python-sdk
|
5c6766a17bac0315bede7a1f5909c912d194a793
|
[
"Apache-2.0"
] | 367
|
2015-02-10T05:55:45.000Z
|
2022-03-16T23:39:58.000Z
|
test/unit/object/test_collaboration_allowlist_entry.py
|
box/box-python-sdk
|
5c6766a17bac0315bede7a1f5909c912d194a793
|
[
"Apache-2.0"
] | 686
|
2015-02-10T01:21:28.000Z
|
2022-03-31T11:40:22.000Z
|
test/unit/object/test_collaboration_allowlist_entry.py
|
box/box-python-sdk
|
5c6766a17bac0315bede7a1f5909c912d194a793
|
[
"Apache-2.0"
] | 260
|
2015-02-16T17:35:06.000Z
|
2022-03-20T17:45:28.000Z
|
# coding: utf-8
from __future__ import unicode_literals, absolute_import
from boxsdk.config import API
def test_get(mock_box_session, test_collaboration_allowlist_entry):
entry_id = test_collaboration_allowlist_entry.object_id
expected_url = '{0}/collaboration_whitelist_entries/{1}'.format(API.BASE_API_URL, entry_id)
mock_entry = {
'type': 'collaboration_whitelist_entry',
'id': '98765',
'domain': 'example.com',
'direction': 'inbound'
}
mock_box_session.get.return_value.json.return_value = mock_entry
entry = test_collaboration_allowlist_entry.get()
mock_box_session.get.assert_called_once_with(expected_url, headers=None, params=None)
assert entry.id == mock_entry['id']
assert entry.domain == mock_entry['domain']
assert entry.direction == mock_entry['direction']
def test_delete(mock_box_session, test_collaboration_allowlist_entry):
entry_id = test_collaboration_allowlist_entry.object_id
expected_url = '{0}/collaboration_whitelist_entries/{1}'.format(API.BASE_API_URL, entry_id)
test_collaboration_allowlist_entry.delete()
mock_box_session.delete.assert_called_once_with(expected_url, expect_json_response=False, headers=None, params={})
| 42.793103
| 118
| 0.767929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 209
| 0.168413
|
36f7aca45d40f82d8142db3d4804603a2675f264
| 1,463
|
py
|
Python
|
jumpy/setup.py
|
bharadwaj1098/brax
|
3108a0535b9b59725c97ef35732ed0378c0fd5cc
|
[
"Apache-2.0"
] | 1,162
|
2021-06-03T20:15:05.000Z
|
2022-03-31T19:53:06.000Z
|
jumpy/setup.py
|
bharadwaj1098/brax
|
3108a0535b9b59725c97ef35732ed0378c0fd5cc
|
[
"Apache-2.0"
] | 160
|
2021-06-05T02:32:39.000Z
|
2022-03-31T11:39:58.000Z
|
jumpy/setup.py
|
bharadwaj1098/brax
|
3108a0535b9b59725c97ef35732ed0378c0fd5cc
|
[
"Apache-2.0"
] | 117
|
2021-06-04T17:18:21.000Z
|
2022-03-30T18:04:48.000Z
|
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""setup.py for Jumpy.
Install for development:
pip intall -e .
"""
from setuptools import setup
setup(
name="brax-jumpy",
version="0.0.1",
description=("Common backend for JAX or numpy."),
author="Brax Authors",
author_email="no-reply@google.com",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="http://github.com/google/brax",
license="Apache 2.0",
py_modules=["jumpy"],
install_requires=[
"jax",
"jaxlib",
"numpy",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
| 29.857143
| 74
| 0.673274
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,071
| 0.732057
|
36f7ebcb27998b0af7e58a152f1c6385a165aa9d
| 33,411
|
py
|
Python
|
simulation/Distance2.py
|
vivirodrigues/carrinheiros-heuristics
|
92c8c4a8384f8e3a86e9c53b41bcb2ab001de5f5
|
[
"MIT"
] | null | null | null |
simulation/Distance2.py
|
vivirodrigues/carrinheiros-heuristics
|
92c8c4a8384f8e3a86e9c53b41bcb2ab001de5f5
|
[
"MIT"
] | null | null | null |
simulation/Distance2.py
|
vivirodrigues/carrinheiros-heuristics
|
92c8c4a8384f8e3a86e9c53b41bcb2ab001de5f5
|
[
"MIT"
] | null | null | null |
import json
import scipy.stats
import matplotlib.pyplot as plt
import scipy.stats as st
from decimal import Decimal, ROUND_HALF_UP
from xml.dom import minidom
import numpy as np
def open_file(nameFile):
try:
f = open(nameFile + ".json", "r")
dados = json.loads(f.read())
f.close()
except:
dados = 0
pass
return dados
def mean_confidence_interval(data, confidence=0.90):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
#return m, m - h, m + h
return m, h
files = [
'../data/results/m38.49999603681327_m12.962358080558504_m38.47398437502447_m12.932893255527242_0_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500671812913836_m12.96339552158351_m38.47352508877093_m12.932765988234031_1_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50194412971296_m12.961982380453897_m38.472997875909336_m12.933973466644028_2_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.5014109499298_m12.960872502034725_m38.47423998586774_m12.935033565792027_3_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50102106363388_m12.962638092503209_m38.474525144844954_m12.932374557163948_4_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.49922134252434_m12.962995897766534_m38.47172032605714_m12.933032796134958_5_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.49989452416794_m12.961981434109553_m38.47288011285585_m12.932171368514155_6_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50237887905613_m12.960648819826947_m38.472913582758295_m12.934273386456828_7_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.5015370998344_m12.962186005531471_m38.47261478466609_m12.934002015361491_8_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50073006631474_m12.961333960783888_m38.4725327574897_m12.932373724953635_9_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50096584572687_m12.96121100042776_m38.47440076442133_m12.934017719276726_10_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50082829471482_m12.960720017172312_m38.47384043859295_m12.933596799909374_11_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.501118552381065_m12.962947784137462_m38.47426226643149_m12.932564078786635_12_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.502373456830234_m12.962333491657414_m38.47477812160141_m12.93271906374045_13_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50148403583942_m12.965290796965846_m38.471909395581456_m12.932729360653218_14_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.501890924160584_m12.961062102765782_m38.4732392389171_m12.933884816602236_15_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.5007597052321_m12.961099590741043_m38.473517022103756_m12.933269493665131_16_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50151426278066_m12.96224952417061_m38.473343947418165_m12.932595128870267_17_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50340379765633_m12.963068504924866_m38.473898022861405_m12.932939179700924_18_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.501402782516365_m12.962743981859667_m38.47361068224981_m12.929892203606808_19_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500951062259055_m12.964628446152132_m38.47375669394401_m12.93455351878407_20_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500486678608006_m12.963212145332431_m38.474758327361364_m12.933328833777356_21_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50234447884447_m12.961648999633914_m38.474661277554_m12.93489642987398_22_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50229159113205_m12.961490473565357_m38.474209563384555_m12.933428060221484_23_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500568338650666_m12.963562146885746_m38.47357849097421_m12.93225101151055_24_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50115701483925_m12.9612635544437_m38.47509217365817_m12.933188948092502_25_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50186554346796_m12.961718758432754_m38.47355380440904_m12.934289622568668_26_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50165434807298_m12.96187628063375_m38.47332172286755_m12.933277161490693_27_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50177737556065_m12.962596650290932_m38.472904517360526_m12.933331456516722_28_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50009702898103_m12.96036292373261_m38.47412281703678_m12.934711892250165_29_30_length_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500734794836475_m12.961295117029927_m38.473498428492356_m12.932937589096973_30_30_length_heuristic_SPFA_nearest_neighbor.xml'
]
files_i = [
#'../../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_distance_heuristic_SPFA_ci_distance',
'../data/results/m38.49999603681327_m12.962358080558504_m38.47398437502447_m12.932893255527242_0_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500671812913836_m12.96339552158351_m38.47352508877093_m12.932765988234031_1_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50194412971296_m12.961982380453897_m38.472997875909336_m12.933973466644028_2_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.5014109499298_m12.960872502034725_m38.47423998586774_m12.935033565792027_3_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50102106363388_m12.962638092503209_m38.474525144844954_m12.932374557163948_4_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.49922134252434_m12.962995897766534_m38.47172032605714_m12.933032796134958_5_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.49989452416794_m12.961981434109553_m38.47288011285585_m12.932171368514155_6_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50237887905613_m12.960648819826947_m38.472913582758295_m12.934273386456828_7_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.5015370998344_m12.962186005531471_m38.47261478466609_m12.934002015361491_8_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50073006631474_m12.961333960783888_m38.4725327574897_m12.932373724953635_9_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50096584572687_m12.96121100042776_m38.47440076442133_m12.934017719276726_10_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50082829471482_m12.960720017172312_m38.47384043859295_m12.933596799909374_11_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.501118552381065_m12.962947784137462_m38.47426226643149_m12.932564078786635_12_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.502373456830234_m12.962333491657414_m38.47477812160141_m12.93271906374045_13_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50148403583942_m12.965290796965846_m38.471909395581456_m12.932729360653218_14_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.501890924160584_m12.961062102765782_m38.4732392389171_m12.933884816602236_15_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.5007597052321_m12.961099590741043_m38.473517022103756_m12.933269493665131_16_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50151426278066_m12.96224952417061_m38.473343947418165_m12.932595128870267_17_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50340379765633_m12.963068504924866_m38.473898022861405_m12.932939179700924_18_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.501402782516365_m12.962743981859667_m38.47361068224981_m12.929892203606808_19_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500951062259055_m12.964628446152132_m38.47375669394401_m12.93455351878407_20_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500486678608006_m12.963212145332431_m38.474758327361364_m12.933328833777356_21_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50234447884447_m12.961648999633914_m38.474661277554_m12.93489642987398_22_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50229159113205_m12.961490473565357_m38.474209563384555_m12.933428060221484_23_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500568338650666_m12.963562146885746_m38.47357849097421_m12.93225101151055_24_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50115701483925_m12.9612635544437_m38.47509217365817_m12.933188948092502_25_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50186554346796_m12.961718758432754_m38.47355380440904_m12.934289622568668_26_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50165434807298_m12.96187628063375_m38.47332172286755_m12.933277161490693_27_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50177737556065_m12.962596650290932_m38.472904517360526_m12.933331456516722_28_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50009702898103_m12.96036292373261_m38.47412281703678_m12.934711892250165_29_30_length_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500734794836475_m12.961295117029927_m38.473498428492356_m12.932937589096973_30_30_length_heuristic_SPFA_closest_insertion.xml'
]
files_d = [#'../../data/results/m43.96267779776494_m19.944747838679202_m43.929659815391865_m19.905049264605925_0_distance_heuristic_SPFA_fi_distance'
'../data/results/m38.49999603681327_m12.962358080558504_m38.47398437502447_m12.932893255527242_0_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500671812913836_m12.96339552158351_m38.47352508877093_m12.932765988234031_1_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50194412971296_m12.961982380453897_m38.472997875909336_m12.933973466644028_2_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.5014109499298_m12.960872502034725_m38.47423998586774_m12.935033565792027_3_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50102106363388_m12.962638092503209_m38.474525144844954_m12.932374557163948_4_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.49922134252434_m12.962995897766534_m38.47172032605714_m12.933032796134958_5_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.49989452416794_m12.961981434109553_m38.47288011285585_m12.932171368514155_6_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50237887905613_m12.960648819826947_m38.472913582758295_m12.934273386456828_7_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.5015370998344_m12.962186005531471_m38.47261478466609_m12.934002015361491_8_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50073006631474_m12.961333960783888_m38.4725327574897_m12.932373724953635_9_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50096584572687_m12.96121100042776_m38.47440076442133_m12.934017719276726_10_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50082829471482_m12.960720017172312_m38.47384043859295_m12.933596799909374_11_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.501118552381065_m12.962947784137462_m38.47426226643149_m12.932564078786635_12_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.502373456830234_m12.962333491657414_m38.47477812160141_m12.93271906374045_13_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50148403583942_m12.965290796965846_m38.471909395581456_m12.932729360653218_14_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.501890924160584_m12.961062102765782_m38.4732392389171_m12.933884816602236_15_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.5007597052321_m12.961099590741043_m38.473517022103756_m12.933269493665131_16_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50151426278066_m12.96224952417061_m38.473343947418165_m12.932595128870267_17_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50340379765633_m12.963068504924866_m38.473898022861405_m12.932939179700924_18_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.501402782516365_m12.962743981859667_m38.47361068224981_m12.929892203606808_19_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500951062259055_m12.964628446152132_m38.47375669394401_m12.93455351878407_20_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500486678608006_m12.963212145332431_m38.474758327361364_m12.933328833777356_21_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50234447884447_m12.961648999633914_m38.474661277554_m12.93489642987398_22_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50229159113205_m12.961490473565357_m38.474209563384555_m12.933428060221484_23_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500568338650666_m12.963562146885746_m38.47357849097421_m12.93225101151055_24_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50115701483925_m12.9612635544437_m38.47509217365817_m12.933188948092502_25_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50186554346796_m12.961718758432754_m38.47355380440904_m12.934289622568668_26_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50165434807298_m12.96187628063375_m38.47332172286755_m12.933277161490693_27_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50177737556065_m12.962596650290932_m38.472904517360526_m12.933331456516722_28_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50009702898103_m12.96036292373261_m38.47412281703678_m12.934711892250165_29_30_length_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500734794836475_m12.961295117029927_m38.473498428492356_m12.932937589096973_30_30_length_heuristic_SPFA_further_insertion.xml'
]
files_b = [#'../../data/results/m43.957018117658315_m19.931545102455843_m43.931890481507786_m19.907162672548026_0_distance_heuristic_SPFA_nn'
'../data/results/m38.49999603681327_m12.962358080558504_m38.47398437502447_m12.932893255527242_0_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500671812913836_m12.96339552158351_m38.47352508877093_m12.932765988234031_1_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50194412971296_m12.961982380453897_m38.472997875909336_m12.933973466644028_2_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.5014109499298_m12.960872502034725_m38.47423998586774_m12.935033565792027_3_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50102106363388_m12.962638092503209_m38.474525144844954_m12.932374557163948_4_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.49922134252434_m12.962995897766534_m38.47172032605714_m12.933032796134958_5_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.49989452416794_m12.961981434109553_m38.47288011285585_m12.932171368514155_6_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50237887905613_m12.960648819826947_m38.472913582758295_m12.934273386456828_7_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.5015370998344_m12.962186005531471_m38.47261478466609_m12.934002015361491_8_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50073006631474_m12.961333960783888_m38.4725327574897_m12.932373724953635_9_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50096584572687_m12.96121100042776_m38.47440076442133_m12.934017719276726_10_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50082829471482_m12.960720017172312_m38.47384043859295_m12.933596799909374_11_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.501118552381065_m12.962947784137462_m38.47426226643149_m12.932564078786635_12_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.502373456830234_m12.962333491657414_m38.47477812160141_m12.93271906374045_13_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50148403583942_m12.965290796965846_m38.471909395581456_m12.932729360653218_14_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.501890924160584_m12.961062102765782_m38.4732392389171_m12.933884816602236_15_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.5007597052321_m12.961099590741043_m38.473517022103756_m12.933269493665131_16_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50151426278066_m12.96224952417061_m38.473343947418165_m12.932595128870267_17_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50340379765633_m12.963068504924866_m38.473898022861405_m12.932939179700924_18_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.501402782516365_m12.962743981859667_m38.47361068224981_m12.929892203606808_19_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500951062259055_m12.964628446152132_m38.47375669394401_m12.93455351878407_20_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500486678608006_m12.963212145332431_m38.474758327361364_m12.933328833777356_21_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50234447884447_m12.961648999633914_m38.474661277554_m12.93489642987398_22_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50229159113205_m12.961490473565357_m38.474209563384555_m12.933428060221484_23_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500568338650666_m12.963562146885746_m38.47357849097421_m12.93225101151055_24_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50115701483925_m12.9612635544437_m38.47509217365817_m12.933188948092502_25_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50186554346796_m12.961718758432754_m38.47355380440904_m12.934289622568668_26_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50165434807298_m12.96187628063375_m38.47332172286755_m12.933277161490693_27_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50177737556065_m12.962596650290932_m38.472904517360526_m12.933331456516722_28_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.50009702898103_m12.96036292373261_m38.47412281703678_m12.934711892250165_29_30_weight_heuristic_SPFA_nearest_neighbor.xml',
'../data/results/m38.500734794836475_m12.961295117029927_m38.473498428492356_m12.932937589096973_30_30_weight_heuristic_SPFA_nearest_neighbor.xml'
]
files_i_b = [#'../../data/results/m43.957018117658315_m19.931545102455843_m43.931890481507786_m19.907162672548026_0_distance_heuristic_SPFA_nn'
'../data/results/m38.49999603681327_m12.962358080558504_m38.47398437502447_m12.932893255527242_0_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500671812913836_m12.96339552158351_m38.47352508877093_m12.932765988234031_1_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50194412971296_m12.961982380453897_m38.472997875909336_m12.933973466644028_2_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.5014109499298_m12.960872502034725_m38.47423998586774_m12.935033565792027_3_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50102106363388_m12.962638092503209_m38.474525144844954_m12.932374557163948_4_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.49922134252434_m12.962995897766534_m38.47172032605714_m12.933032796134958_5_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.49989452416794_m12.961981434109553_m38.47288011285585_m12.932171368514155_6_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50237887905613_m12.960648819826947_m38.472913582758295_m12.934273386456828_7_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.5015370998344_m12.962186005531471_m38.47261478466609_m12.934002015361491_8_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50073006631474_m12.961333960783888_m38.4725327574897_m12.932373724953635_9_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50096584572687_m12.96121100042776_m38.47440076442133_m12.934017719276726_10_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50082829471482_m12.960720017172312_m38.47384043859295_m12.933596799909374_11_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.501118552381065_m12.962947784137462_m38.47426226643149_m12.932564078786635_12_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.502373456830234_m12.962333491657414_m38.47477812160141_m12.93271906374045_13_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50148403583942_m12.965290796965846_m38.471909395581456_m12.932729360653218_14_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.501890924160584_m12.961062102765782_m38.4732392389171_m12.933884816602236_15_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.5007597052321_m12.961099590741043_m38.473517022103756_m12.933269493665131_16_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50151426278066_m12.96224952417061_m38.473343947418165_m12.932595128870267_17_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50340379765633_m12.963068504924866_m38.473898022861405_m12.932939179700924_18_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.501402782516365_m12.962743981859667_m38.47361068224981_m12.929892203606808_19_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500951062259055_m12.964628446152132_m38.47375669394401_m12.93455351878407_20_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500486678608006_m12.963212145332431_m38.474758327361364_m12.933328833777356_21_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50234447884447_m12.961648999633914_m38.474661277554_m12.93489642987398_22_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50229159113205_m12.961490473565357_m38.474209563384555_m12.933428060221484_23_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500568338650666_m12.963562146885746_m38.47357849097421_m12.93225101151055_24_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50115701483925_m12.9612635544437_m38.47509217365817_m12.933188948092502_25_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50186554346796_m12.961718758432754_m38.47355380440904_m12.934289622568668_26_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50165434807298_m12.96187628063375_m38.47332172286755_m12.933277161490693_27_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50177737556065_m12.962596650290932_m38.472904517360526_m12.933331456516722_28_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.50009702898103_m12.96036292373261_m38.47412281703678_m12.934711892250165_29_30_weight_heuristic_SPFA_closest_insertion.xml',
'../data/results/m38.500734794836475_m12.961295117029927_m38.473498428492356_m12.932937589096973_30_30_weight_heuristic_SPFA_closest_insertion.xml'
]
files_d_b = [#'../../data/results/m43.957018117658315_m19.931545102455843_m43.931890481507786_m19.907162672548026_0_distance_heuristic_SPFA_nn'
'../data/results/m38.49999603681327_m12.962358080558504_m38.47398437502447_m12.932893255527242_0_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500671812913836_m12.96339552158351_m38.47352508877093_m12.932765988234031_1_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50194412971296_m12.961982380453897_m38.472997875909336_m12.933973466644028_2_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.5014109499298_m12.960872502034725_m38.47423998586774_m12.935033565792027_3_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50102106363388_m12.962638092503209_m38.474525144844954_m12.932374557163948_4_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.49922134252434_m12.962995897766534_m38.47172032605714_m12.933032796134958_5_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.49989452416794_m12.961981434109553_m38.47288011285585_m12.932171368514155_6_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50237887905613_m12.960648819826947_m38.472913582758295_m12.934273386456828_7_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.5015370998344_m12.962186005531471_m38.47261478466609_m12.934002015361491_8_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50073006631474_m12.961333960783888_m38.4725327574897_m12.932373724953635_9_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50096584572687_m12.96121100042776_m38.47440076442133_m12.934017719276726_10_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50082829471482_m12.960720017172312_m38.47384043859295_m12.933596799909374_11_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.501118552381065_m12.962947784137462_m38.47426226643149_m12.932564078786635_12_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.502373456830234_m12.962333491657414_m38.47477812160141_m12.93271906374045_13_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50148403583942_m12.965290796965846_m38.471909395581456_m12.932729360653218_14_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.501890924160584_m12.961062102765782_m38.4732392389171_m12.933884816602236_15_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.5007597052321_m12.961099590741043_m38.473517022103756_m12.933269493665131_16_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50151426278066_m12.96224952417061_m38.473343947418165_m12.932595128870267_17_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50340379765633_m12.963068504924866_m38.473898022861405_m12.932939179700924_18_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.501402782516365_m12.962743981859667_m38.47361068224981_m12.929892203606808_19_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500951062259055_m12.964628446152132_m38.47375669394401_m12.93455351878407_20_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500486678608006_m12.963212145332431_m38.474758327361364_m12.933328833777356_21_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50234447884447_m12.961648999633914_m38.474661277554_m12.93489642987398_22_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50229159113205_m12.961490473565357_m38.474209563384555_m12.933428060221484_23_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500568338650666_m12.963562146885746_m38.47357849097421_m12.93225101151055_24_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50115701483925_m12.9612635544437_m38.47509217365817_m12.933188948092502_25_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50186554346796_m12.961718758432754_m38.47355380440904_m12.934289622568668_26_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50165434807298_m12.96187628063375_m38.47332172286755_m12.933277161490693_27_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50177737556065_m12.962596650290932_m38.472904517360526_m12.933331456516722_28_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.50009702898103_m12.96036292373261_m38.47412281703678_m12.934711892250165_29_30_weight_heuristic_SPFA_further_insertion.xml',
'../data/results/m38.500734794836475_m12.961295117029927_m38.473498428492356_m12.932937589096973_30_30_weight_heuristic_SPFA_further_insertion.xml'
]
values_t = []
values_i = []
values_d = []
values_t_b = []
values_i_b = []
values_d_b = []
for a in range(len(files)):
file = minidom.parse(files[a])
tag = file.getElementsByTagName('tripinfo')
duration = [float(node.attributes['routeLength'].value) for node in tag]
values_t.append(duration[0] / 1000)
file = minidom.parse(files_i[a])
tag = file.getElementsByTagName('tripinfo')
duration = [float(node.attributes['routeLength'].value) for node in tag]
values_i.append(duration[0] / 1000)
# 1, 13
file = minidom.parse(files_d[a])
tag = file.getElementsByTagName('tripinfo')
duration = [float(node.attributes['routeLength'].value) for node in tag]
values_d.append(duration[0] / 1000)
file = minidom.parse(files_b[a])
tag = file.getElementsByTagName('tripinfo')
duration = [float(node.attributes['routeLength'].value) for node in tag]
values_t_b.append(duration[0] / 1000)
file = minidom.parse(files_i_b[a])
tag = file.getElementsByTagName('tripinfo')
duration = [float(node.attributes['routeLength'].value) for node in tag]
values_i_b.append(duration[0] / 1000)
file = minidom.parse(files_d_b[a])
tag = file.getElementsByTagName('tripinfo')
duration = [float(node.attributes['routeLength'].value) for node in tag]
values_d_b.append(duration[0] / 1000)
m, h = mean_confidence_interval(values_t, 0.95)
m1, h1 = mean_confidence_interval(values_i, 0.95)
m2, h2 = mean_confidence_interval(values_d, 0.95)
m_b, h_b = mean_confidence_interval(values_t_b, 0.95)
m1_b, h1_b = mean_confidence_interval(values_i_b, 0.95)
m2_b, h2_b = mean_confidence_interval(values_d_b, 0.95)
medias = [m, m1, m2]
erros = [h, h1, h2]
medias_b = [m_b, m1_b, m2_b]
erros_b = [h_b, h1_b, h2_b]
print("medias, SDP", medias)
print('Nearest Neighbor', 'Closest Insertion', 'Further Insertion')
print("medias, LWP", medias_b)
print("erros, SDP", erros)
print("erros, LWP", erros_b)
# define sample data
# data = values # [12, 12, 13, 13, 15, 16, 17, 22, 23, 25, 26, 27, 28, 28, 29]
# create 95% confidence interval for population mean weight
# print(st.t.interval(alpha=0.95, df=len(data) - 1, loc=np.mean(data), scale=st.sem(data)))
labels = ['Nearest Neighbor', 'Closest Insertion', 'Further Insertion']
x = np.arange(len(labels)) # the label locations
width = 0.25 # 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width / 2, medias, width, yerr=erros, label='SDP', zorder=10)
r2 = ax.bar(x + width / 2, medias_b, width, yerr=erros_b, label='LWP', zorder=10)
# Add some text for labels, title and custom x-axis tick labels, etc.
# ax.set_ylabel('Potência média (W)', fontdict='bold')
plt.ylabel('Time [h]', fontweight="bold", fontsize=11)
plt.ylim(0, max(medias) + 2)
plt.grid(True, which="both", ls="-", linewidth=0.1, color='0.10', zorder=0)
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend(numpoints=1, loc="upper left", ncol=2, prop={'size': 10})
fig.tight_layout()
plt.show()
| 103.12037
| 160
| 0.829427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28,352
| 0.848532
|
36f815fa18399e9d17f81a9738794e259e786f45
| 9,134
|
py
|
Python
|
spatial_interpolators/radial_basis.py
|
tsutterley/spatial-interpolators
|
6949807dd3ee4cbc7cd9bd323dbf3304fbd19ca2
|
[
"MIT"
] | 18
|
2018-09-14T04:12:01.000Z
|
2021-08-03T11:14:45.000Z
|
spatial_interpolators/radial_basis.py
|
tsutterley/spatial-interpolators
|
6949807dd3ee4cbc7cd9bd323dbf3304fbd19ca2
|
[
"MIT"
] | 2
|
2021-07-08T16:17:10.000Z
|
2022-01-04T16:26:55.000Z
|
spatial_interpolators/radial_basis.py
|
tsutterley/spatial-interpolators
|
6949807dd3ee4cbc7cd9bd323dbf3304fbd19ca2
|
[
"MIT"
] | 3
|
2018-09-19T06:34:42.000Z
|
2019-10-03T12:22:23.000Z
|
#!/usr/bin/env python
u"""
radial_basis.py
Written by Tyler Sutterley (01/2022)
Interpolates data using radial basis functions
CALLING SEQUENCE:
ZI = radial_basis(xs, ys, zs, XI, YI, polynomial=0,
smooth=smooth, epsilon=epsilon, method='inverse')
INPUTS:
xs: scaled input X data
ys: scaled input Y data
zs: input data
XI: scaled grid X for output ZI
YI: scaled grid Y for output ZI
OUTPUTS:
ZI: interpolated data grid
OPTIONS:
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
default is mean Euclidean distance
polynomial: polynomial order if augmenting radial basis functions
default None: no polynomials
method: radial basis function
multiquadric
inverse_multiquadric or inverse (default)
inverse_quadratic
gaussian
linear (first-order polyharmonic spline)
cubic (third-order polyharmonic spline)
quintic (fifth-order polyharmonic spline)
thin_plate: thin-plate spline
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python (https://numpy.org)
scipy: Scientific Tools for Python (https://docs.scipy.org/doc/)
REFERENCES:
R. L. Hardy, Multiquadric equations of topography and other irregular
surfaces, J. Geophys. Res., 76(8), 1905-1915, 1971.
M. Buhmann, "Radial Basis Functions", Cambridge Monographs on Applied and
Computational Mathematics, 2003.
UPDATE HISTORY:
Updated 01/2022: added function docstrings
Updated 07/2021: using scipy spatial distance routines
Updated 09/2017: using rcond=-1 in numpy least-squares algorithms
Updated 01/2017: epsilon in polyharmonic splines (linear, cubic, quintic)
Updated 08/2016: using format text within ValueError, edit constant vector
added low-order polynomial option (previously used default constant)
Updated 01/2016: new hierarchical_radial_basis function
that first reduces to points within distance. added cutoff option
Updated 10/2014: added third dimension (spherical)
Written 08/2014
"""
from __future__ import print_function, division
import numpy as np
import scipy.spatial
def radial_basis(xs, ys, zs, XI, YI, smooth=0.0, metric='euclidean',
epsilon=None, method='inverse', polynomial=None):
"""
Interpolates data using radial basis functions
Arguments
---------
xs: scaled input x-coordinates
ys: scaled input y-coordinates
zs: input data
XI: scaled output x-coordinates for data grid
YI: scaled output y-coordinates for data grid
Keyword arguments
-----------------
smooth: smoothing weights
metric: distance metric to use (default euclidean)
epsilon: adjustable constant for distance functions
method: radial basis function
- multiquadric
- inverse_multiquadric or inverse (default)
- inverse_quadratic
- gaussian
- linear (first-order polyharmonic spline)
- cubic (third-order polyharmonic spline)
- quintic (fifth-order polyharmonic spline)
- thin_plate: thin-plate spline
polynomial: polynomial order if augmenting radial basis functions
Returns
-------
ZI: interpolated data grid
"""
#-- remove singleton dimensions
xs = np.squeeze(xs)
ys = np.squeeze(ys)
zs = np.squeeze(zs)
XI = np.squeeze(XI)
YI = np.squeeze(YI)
#-- size of new matrix
if (np.ndim(XI) == 1):
nx = len(XI)
else:
nx,ny = np.shape(XI)
#-- Check to make sure sizes of input arguments are correct and consistent
if (len(zs) != len(xs)) | (len(zs) != len(ys)):
raise Exception('Length of X, Y, and Z must be equal')
if (np.shape(XI) != np.shape(YI)):
raise Exception('Size of XI and YI must be equal')
#-- create python dictionary of radial basis function formulas
radial_basis_functions = {}
radial_basis_functions['multiquadric'] = multiquadric
radial_basis_functions['inverse_multiquadric'] = inverse_multiquadric
radial_basis_functions['inverse'] = inverse_multiquadric
radial_basis_functions['inverse_quadratic'] = inverse_quadratic
radial_basis_functions['gaussian'] = gaussian
radial_basis_functions['linear'] = poly_spline1
radial_basis_functions['cubic'] = poly_spline3
radial_basis_functions['quintic'] = poly_spline5
radial_basis_functions['thin_plate'] = thin_plate
#-- check if formula name is listed
if method in radial_basis_functions.keys():
RBF = radial_basis_functions[method]
else:
raise ValueError("Method {0} not implemented".format(method))
#-- Creation of data distance matrix
#-- Data to Data
if (metric == 'brute'):
#-- use linear algebra to compute euclidean distances
Rd = distance_matrix(
np.array([xs, ys]),
np.array([xs, ys])
)
else:
#-- use scipy spatial distance routines
Rd = scipy.spatial.distance.cdist(
np.array([xs, ys]).T,
np.array([xs, ys]).T,
metric=metric)
#-- shape of distance matrix
N,M = np.shape(Rd)
#-- if epsilon is not specified
if epsilon is None:
#-- calculate norm with mean euclidean distance
uix,uiy = np.nonzero(np.tri(N,M=M,k=-1))
epsilon = np.mean(Rd[uix,uiy])
#-- possible augmentation of the PHI Matrix with polynomial Vectors
if polynomial is None:
#-- calculate radial basis function for data-to-data with smoothing
PHI = RBF(epsilon, Rd) + np.eye(N,M=M)*smooth
DMAT = zs.copy()
else:
#-- number of polynomial coefficients
nt = (polynomial**2 + 3*polynomial)//2 + 1
#-- calculate radial basis function for data-to-data with smoothing
PHI = np.zeros((N+nt,M+nt))
PHI[:N,:M] = RBF(epsilon, Rd) + np.eye(N,M=M)*smooth
#-- augmentation of PHI matrix with polynomials
POLY = polynomial_matrix(xs,ys,polynomial)
DMAT = np.concatenate(([zs,np.zeros((nt))]),axis=0)
#-- augment PHI matrix
for t in range(nt):
PHI[:N,M+t] = POLY[:,t]
PHI[N+t,:M] = POLY[:,t]
#-- Computation of the Weights
w = np.linalg.lstsq(PHI,DMAT[:,np.newaxis],rcond=-1)[0]
#-- Computation of distance Matrix
#-- Computation of distance Matrix (data to mesh points)
if (metric == 'brute'):
#-- use linear algebra to compute euclidean distances
Re = distance_matrix(
np.array([XI.flatten(),YI.flatten()]),
np.array([xs,ys])
)
else:
#-- use scipy spatial distance routines
Re = scipy.spatial.distance.cdist(
np.array([XI.flatten(),YI.flatten()]).T,
np.array([xs, ys]).T,
metric=metric)
#-- calculate radial basis function for data-to-mesh matrix
E = RBF(epsilon,Re)
#-- possible augmentation of the Evaluation Matrix with polynomial vectors
if polynomial is not None:
P = polynomial_matrix(XI.flatten(),YI.flatten(),polynomial)
E = np.concatenate(([E, P]),axis=1)
#-- calculate output interpolated array (or matrix)
if (np.ndim(XI) == 1):
ZI = np.squeeze(np.dot(E,w))
else:
ZI = np.zeros((nx,ny))
ZI[:,:] = np.dot(E,w).reshape(nx,ny)
#-- return the interpolated array (or matrix)
return ZI
#-- define radial basis function formulas
def multiquadric(epsilon, r):
#-- multiquadratic
f = np.sqrt((epsilon*r)**2 + 1.0)
return f
def inverse_multiquadric(epsilon, r):
#-- inverse multiquadratic
f = 1.0/np.sqrt((epsilon*r)**2 + 1.0)
return f
def inverse_quadratic(epsilon, r):
#-- inverse quadratic
f = 1.0/(1.0+(epsilon*r)**2)
return f
def gaussian(epsilon, r):
#-- gaussian
f = np.exp(-(epsilon*r)**2)
return f
def poly_spline1(epsilon, r):
#-- First-order polyharmonic spline
f = (epsilon*r)
return f
def poly_spline3(epsilon, r):
#-- Third-order polyharmonic spline
f = (epsilon*r)**3
return f
def poly_spline5(epsilon, r):
#-- Fifth-order polyharmonic spline
f = (epsilon*r)**5
return f
def thin_plate(epsilon, r):
#-- thin plate spline
f = r**2 * np.log(r)
#-- the spline is zero at zero
f[r == 0] = 0.0
return f
#-- calculate Euclidean distances between points as matrices
def distance_matrix(x,cntrs):
s,M = np.shape(x)
s,N = np.shape(cntrs)
D = np.zeros((M,N))
for d in range(s):
ii, = np.dot(d,np.ones((1,N))).astype(np.int)
jj, = np.dot(d,np.ones((1,M))).astype(np.int)
dx = x[ii,:].transpose() - cntrs[jj,:]
D += dx**2
D = np.sqrt(D)
return D
#-- calculate polynomial matrix to augment radial basis functions
def polynomial_matrix(x,y,order):
c = 0
M = len(x)
N = (order**2 + 3*order)//2 + 1
POLY = np.zeros((M,N))
for ii in range(order + 1):
for jj in range(ii + 1):
POLY[:,c] = (x**jj)*(y**(ii-jj))
c += 1
return POLY
| 33.214545
| 78
| 0.637837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,961
| 0.543136
|
36fae5ad374222c00d5bde1c50b8adc1fc9b19c3
| 465
|
py
|
Python
|
oldstuff/api1.py
|
miusuarioamigo/python-Le
|
dbb653255dab7d11b87f25eec94bcce63a86aa42
|
[
"MIT"
] | null | null | null |
oldstuff/api1.py
|
miusuarioamigo/python-Le
|
dbb653255dab7d11b87f25eec94bcce63a86aa42
|
[
"MIT"
] | null | null | null |
oldstuff/api1.py
|
miusuarioamigo/python-Le
|
dbb653255dab7d11b87f25eec94bcce63a86aa42
|
[
"MIT"
] | null | null | null |
from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route('/', methods =['GET', 'POST'])
def index():
if (request.method == 'POST'):
some_json = request.get_json()
return jsonify({'you sent': some_json}),201
else:
return jsonify({"about" : "Hello World!"})
@app.route('/multi/<int:n1>', methods=['GET'])
def get_mul10(n1):
return jsonify({"result" : n1*10})
if __name__ == "__main__":
app.run(debug=True)
| 25.833333
| 51
| 0.612903
| 0
| 0
| 0
| 0
| 345
| 0.741935
| 0
| 0
| 91
| 0.195699
|
36fb1e4b44269afa44164c5c335b64583671d7bf
| 5,129
|
py
|
Python
|
tests/mock/tests/settings.py
|
magicjoey/django-knowledge
|
ce6faa904a88e5d4f565763bc1d5cd07e6b5c5bd
|
[
"ISC"
] | 199
|
2015-01-22T05:07:30.000Z
|
2022-03-28T06:59:46.000Z
|
tests/mock/tests/settings.py
|
tzangms/django-knowledge
|
8238b1f4c1c6e12acb7f3fc327346776379a7a68
|
[
"0BSD"
] | 3
|
2015-10-20T09:48:58.000Z
|
2018-03-14T21:16:29.000Z
|
tests/mock/tests/settings.py
|
tzangms/django-knowledge
|
8238b1f4c1c6e12acb7f3fc327346776379a7a68
|
[
"0BSD"
] | 78
|
2015-02-09T02:23:16.000Z
|
2021-12-25T07:02:08.000Z
|
from mock.tests.base import TestCase
from django.test.client import Client
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from knowledge import settings
from knowledge.models import Question, Response
from knowledge.forms import QuestionForm, ResponseForm
class BasicSettingsTest(TestCase):
def test_ALLOW_ANONYMOUS(self):
self.assertFalse(settings.ALLOW_ANONYMOUS)
self.assertEqual(
None,
QuestionForm(self.anon)
)
self.assertEqual(
None,
ResponseForm(self.anon, self.question)
)
############# flip setting ##############
settings.ALLOW_ANONYMOUS = not settings.ALLOW_ANONYMOUS
############# flip setting ##############
self.assertNotEqual(
None,
QuestionForm(self.anon)
)
self.assertNotEqual(
None,
ResponseForm(self.anon, self.question)
)
form = QuestionForm(self.anon)
self.assertNotIn('status', form.fields.keys())
# missing the name/email...
QUESTION_POST = {
'title': 'This is a title friend!',
'body': 'This is the body friend!'
}
form = QuestionForm(self.anon, QUESTION_POST)
self.assertFalse(form.is_valid())
QUESTION_POST = {
'name': 'Test Guy',
'email': 'anonymous@example.com',
'title': 'This is a title friend!',
'body': 'This is the body friend!'
}
form = QuestionForm(self.anon, QUESTION_POST)
self.assertTrue(form.is_valid())
question = form.save()
# question has no user and is public by default
self.assertFalse(question.user)
self.assertEquals(question.name, 'Test Guy')
self.assertEquals(question.email, 'anonymous@example.com')
self.assertEquals(question.status, 'public')
############# flip setting ##############
settings.ALLOW_ANONYMOUS = not settings.ALLOW_ANONYMOUS
############# flip setting ##############
def test_AUTO_PUBLICIZE(self):
self.assertFalse(settings.AUTO_PUBLICIZE)
QUESTION_POST = {
'title': 'This is a title friend!',
'body': 'This is the body friend!',
'status': 'private'
}
question = QuestionForm(self.joe, QUESTION_POST).save()
self.assertEquals(question.status, 'private')
############# flip setting ##############
settings.AUTO_PUBLICIZE = not settings.AUTO_PUBLICIZE
############# flip setting ##############
question = QuestionForm(self.joe, QUESTION_POST).save()
self.assertEquals(question.status, 'public')
############# flip setting ##############
settings.AUTO_PUBLICIZE = not settings.AUTO_PUBLICIZE
############# flip setting ##############
def test_FREE_RESPONSE(self):
self.assertTrue(settings.FREE_RESPONSE)
# joe authored the question, it is private so any user can respond...
self.assertFalse(ResponseForm(self.anon, self.question))
self.assertTrue(ResponseForm(self.bob, self.question))
self.assertTrue(ResponseForm(self.joe, self.question))
self.assertTrue(ResponseForm(self.admin, self.question))
############# flip setting ##############
settings.FREE_RESPONSE = not settings.FREE_RESPONSE
############# flip setting ##############
# ...now bob can't respond!
self.assertFalse(ResponseForm(self.anon, self.question))
self.assertFalse(ResponseForm(self.bob, self.question))
self.assertTrue(ResponseForm(self.joe, self.question))
self.assertTrue(ResponseForm(self.admin, self.question))
############# flip setting ##############
settings.FREE_RESPONSE = not settings.FREE_RESPONSE
############# flip setting ##############
def test_SLUG_URLS(self):
self.assertTrue(settings.SLUG_URLS)
c = Client()
self.question.public()
question_url = reverse('knowledge_thread', args=[self.question.id, slugify(self.question.title)])
r = c.get(reverse('knowledge_thread', args=[self.question.id, 'a-big-long-slug']))
self.assertEquals(r.status_code, 301)
r = c.get(question_url)
self.assertEquals(r.status_code, 200)
############# flip setting ##############
settings.SLUG_URLS = not settings.SLUG_URLS
############# flip setting ##############
r = c.get(reverse('knowledge_thread', args=[self.question.id, 'a-big-long-slug']))
self.assertEquals(r.status_code, 301)
r = c.get(question_url)
self.assertEquals(r.status_code, 301)
r = c.get(reverse('knowledge_thread_no_slug', args=[self.question.id]))
self.assertEquals(r.status_code, 200)
############# flip setting ##############
settings.SLUG_URLS = not settings.SLUG_URLS
############# flip setting ##############
| 32.66879
| 105
| 0.580425
| 4,775
| 0.930981
| 0
| 0
| 0
| 0
| 0
| 0
| 1,261
| 0.245857
|
36fd537a07164889366087995d08455fc14bd19e
| 828
|
py
|
Python
|
Batch_sentiment/spark_hashtag.py
|
malli3131/SparkApps
|
b24763eaf6411cba3c22a4c070a45d6fe96dfa1d
|
[
"Apache-2.0"
] | 3
|
2018-01-17T05:51:10.000Z
|
2018-11-22T16:59:53.000Z
|
Batch_sentiment/spark_hashtag.py
|
malli3131/SparkApps
|
b24763eaf6411cba3c22a4c070a45d6fe96dfa1d
|
[
"Apache-2.0"
] | 2
|
2016-12-15T13:15:42.000Z
|
2016-12-15T13:19:19.000Z
|
Batch_sentiment/spark_hashtag.py
|
malli3131/SparkApps
|
b24763eaf6411cba3c22a4c070a45d6fe96dfa1d
|
[
"Apache-2.0"
] | 4
|
2018-02-12T06:37:04.000Z
|
2020-01-04T11:30:24.000Z
|
import re
import string
import sys
from pyspark import SparkContext
exclude = set(string.punctuation)
def get_hash_tag(word, rmPunc):
pattern = re.compile("^#(.*)")
m = pattern.match(word)
tag = None
if m:
match = m.groups()
for m_word in match:
tag = ''.join(letter for letter in m_word if letter not in rmPunc)
if tag is not None:
return tag
sc = SparkContext("local", "Finidng Hash Tags")
rmPunc = sc.broadcast(exclude)
mydata = sc.textFile("hdfs://<hostname>:<port>/path/to/parsedata<first job output>")
wordsRDD = mydata.flatMap( lambda line : line.split("\t")[1].split(" "))
tagsRDD = wordsRDD.map( lambda word : get_hash_tag(word, rmPunc.value))
hashtagsRDD = tagsRDD.filter( lambda word : word is not None)
hashtagsRDD.saveAsTextFile("hdfs://<hostname>:<port>/path/to/hashtags")
| 30.666667
| 84
| 0.695652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 148
| 0.178744
|
7fc0ed53e23bdf7182409dab9a83d9dcb7cb0ae5
| 417
|
py
|
Python
|
backend/apps/risks/urls.py
|
intellisense/risks
|
e98b8c6e5694b895603f7ff1b3c04b6057aa1136
|
[
"MIT"
] | null | null | null |
backend/apps/risks/urls.py
|
intellisense/risks
|
e98b8c6e5694b895603f7ff1b3c04b6057aa1136
|
[
"MIT"
] | null | null | null |
backend/apps/risks/urls.py
|
intellisense/risks
|
e98b8c6e5694b895603f7ff1b3c04b6057aa1136
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
urlpatterns = [
url(r'^risks/$', views.RiskTypeList.as_view(), name='risks_list'),
url(r'^risks/(?P<pk>[0-9]+)/$', views.RiskTypeDetail.as_view(), name='risk_details'),
url(r'^fields/$', views.FieldTypes.as_view(), name='field_types'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 34.75
| 89
| 0.729017
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 88
| 0.211031
|
7fc0f798553336843920795f3c9cd1c0cfdb4288
| 534
|
py
|
Python
|
src/main.py
|
sguzman/Dbase_Channel_Grab
|
30a9e3abd72ed93cd3c7ea80d44b664a0a76d8af
|
[
"Unlicense"
] | null | null | null |
src/main.py
|
sguzman/Dbase_Channel_Grab
|
30a9e3abd72ed93cd3c7ea80d44b664a0a76d8af
|
[
"Unlicense"
] | null | null | null |
src/main.py
|
sguzman/Dbase_Channel_Grab
|
30a9e3abd72ed93cd3c7ea80d44b664a0a76d8af
|
[
"Unlicense"
] | null | null | null |
import json
import bs4
import requests
url_base = 'https://dbase.tube/chart/channels/subscribers/all?page=%s&spf=navigate'
max_page = 19084
html_doc = requests.get(url_base).text
for i in range(max_page):
url = url_base % i
hot_bod = requests.get(url).text
json_blob = json.loads(hot_bod)
html_body = json_blob['body']['spf_content']
soup = bs4.BeautifulSoup(html_body, 'html.parser')
for j in soup.findAll('a', class_='list__item'):
channel_raw = j['href']
print(channel_raw.split('/')[2])
| 28.105263
| 83
| 0.687266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 128
| 0.2397
|
7fc1e0de66e5ca0c06e7e6fc0f89c827b64df6c5
| 315
|
py
|
Python
|
Leetcode/322. Coin Change/solution1.py
|
asanoviskhak/Outtalent
|
c500e8ad498f76d57eb87a9776a04af7bdda913d
|
[
"MIT"
] | 51
|
2020-07-12T21:27:47.000Z
|
2022-02-11T19:25:36.000Z
|
Leetcode/322. Coin Change/solution1.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | null | null | null |
Leetcode/322. Coin Change/solution1.py
|
CrazySquirrel/Outtalent
|
8a10b23335d8e9f080e5c39715b38bcc2916ff00
|
[
"MIT"
] | 32
|
2020-07-27T13:54:24.000Z
|
2021-12-25T18:12:50.000Z
|
class Solution:
def coinChange(self, coins: List[int], amount: int) -> int:
dp = [inf] * (amount + 1)
dp[0] = 0
for coin in coins:
for x in range(coin, amount + 1):
dp[x] = min(dp[x], dp[x - coin] + 1)
return dp[amount] if dp[amount] != inf else -1
| 28.636364
| 63
| 0.492063
| 314
| 0.996825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7fc44269a458fb1cbf6dc4894b2532e5211304c0
| 1,166
|
py
|
Python
|
kanban_board/admin.py
|
Zeerooth/django-kanban-board
|
d390635017199a90da666bba3a74cafc86838884
|
[
"BSD-3-Clause"
] | null | null | null |
kanban_board/admin.py
|
Zeerooth/django-kanban-board
|
d390635017199a90da666bba3a74cafc86838884
|
[
"BSD-3-Clause"
] | 2
|
2021-06-10T17:52:06.000Z
|
2021-09-22T18:00:26.000Z
|
kanban_board/admin.py
|
Zeerooth/django-kanban-board
|
d390635017199a90da666bba3a74cafc86838884
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from ordered_model.admin import OrderedStackedInline, OrderedInlineModelAdminMixin
from kanban_board.models import KanbanBoard, KanbanBoardState, Workflow, KanbanBoardElement
class KanbanBoardAdmin(admin.ModelAdmin):
list_display = ('name', 'workflow', 'element_count')
filter_horizontal = ('allowed_users', 'allowed_groups')
def element_count(self, obj):
return KanbanBoardElement.objects.filter(kanban_board_parent=obj).select_subclasses().count()
class KanbanBoardStateInline(OrderedStackedInline):
model = KanbanBoardState
fields = ('workflow', 'name', 'move_up_down_links', )
readonly_fields = ('workflow', 'move_up_down_links', )
extra = 0
ordering = ('order',)
class WorkflowAdmin(OrderedInlineModelAdminMixin, admin.ModelAdmin):
list_display = ('name', 'workflow_sequence')
inlines = (KanbanBoardStateInline, )
def workflow_sequence(self, obj):
return "->".join([str(x.name) for x in list(obj.kanbanboardstate_set.all())])
admin.site.register(KanbanBoard, KanbanBoardAdmin)
admin.site.register(KanbanBoardState)
admin.site.register(Workflow, WorkflowAdmin)
| 37.612903
| 101
| 0.762436
| 815
| 0.698971
| 0
| 0
| 0
| 0
| 0
| 0
| 164
| 0.140652
|
7fc4576c38452997b1f8bd1ddca0fc4d69cf33db
| 16,705
|
py
|
Python
|
certbot-nginx/certbot_nginx/tests/nginxparser_test.py
|
jcollie/certbot
|
1df778859b7ace699c02039b269abd426058a237
|
[
"Apache-2.0"
] | 4
|
2020-04-09T21:57:23.000Z
|
2020-04-11T13:26:54.000Z
|
certbot-nginx/certbot_nginx/tests/nginxparser_test.py
|
jcollie/certbot
|
1df778859b7ace699c02039b269abd426058a237
|
[
"Apache-2.0"
] | 32
|
2019-02-20T14:51:48.000Z
|
2019-02-27T10:11:34.000Z
|
certbot-nginx/certbot_nginx/tests/nginxparser_test.py
|
jcollie/certbot
|
1df778859b7ace699c02039b269abd426058a237
|
[
"Apache-2.0"
] | 1
|
2020-02-06T15:04:02.000Z
|
2020-02-06T15:04:02.000Z
|
"""Test for certbot_nginx.nginxparser."""
import copy
import operator
import tempfile
import unittest
from pyparsing import ParseException
from certbot_nginx.nginxparser import (
RawNginxParser, loads, load, dumps, dump, UnspacedList)
from certbot_nginx.tests import util
FIRST = operator.itemgetter(0)
class TestRawNginxParser(unittest.TestCase):
"""Test the raw low-level Nginx config parser."""
def test_assignments(self):
parsed = RawNginxParser.assignment.parseString('root /test;').asList()
self.assertEqual(parsed, ['root', ' ', '/test'])
parsed = RawNginxParser.assignment.parseString('root /test;foo bar;').asList()
self.assertEqual(parsed, ['root', ' ', '/test'], ['foo', ' ', 'bar'])
def test_blocks(self):
parsed = RawNginxParser.block.parseString('foo {}').asList()
self.assertEqual(parsed, [['foo', ' '], []])
parsed = RawNginxParser.block.parseString('location /foo{}').asList()
self.assertEqual(parsed, [['location', ' ', '/foo'], []])
parsed = RawNginxParser.block.parseString('foo { bar foo ; }').asList()
self.assertEqual(parsed, [['foo', ' '], [[' ', 'bar', ' ', 'foo', ' '], ' ']])
def test_nested_blocks(self):
parsed = RawNginxParser.block.parseString('foo { bar {} }').asList()
block, content = parsed
self.assertEqual(FIRST(content), [[' ', 'bar', ' '], []])
self.assertEqual(FIRST(block), 'foo')
def test_dump_as_string(self):
dumped = dumps(UnspacedList([
['user', ' ', 'www-data'],
[['\n', 'server', ' '], [
['\n ', 'listen', ' ', '80'],
['\n ', 'server_name', ' ', 'foo.com'],
['\n ', 'root', ' ', '/home/ubuntu/sites/foo/'],
[['\n\n ', 'location', ' ', '/status', ' '], [
['\n ', 'check_status', ''],
[['\n\n ', 'types', ' '],
[['\n ', 'image/jpeg', ' ', 'jpg']]],
]]
]]]))
self.assertEqual(dumped.split('\n'),
'user www-data;\n'
'server {\n'
' listen 80;\n'
' server_name foo.com;\n'
' root /home/ubuntu/sites/foo/;\n'
'\n'
' location /status {\n'
' check_status;\n'
'\n'
' types {\n'
' image/jpeg jpg;}}}'.split('\n'))
def test_parse_from_file(self):
with open(util.get_data_filename('foo.conf')) as handle:
parsed = util.filter_comments(load(handle))
self.assertEqual(
parsed,
[['user', 'www-data'],
[['http'],
[[['server'], [
['listen', '*:80', 'default_server', 'ssl'],
['server_name', '*.www.foo.com', '*.www.example.com'],
['root', '/home/ubuntu/sites/foo/'],
[['location', '/status'], [
[['types'], [['image/jpeg', 'jpg']]],
]],
[['location', '~', r'case_sensitive\.php$'], [
['index', 'index.php'],
['root', '/var/root'],
]],
[['location', '~*', r'case_insensitive\.php$'], []],
[['location', '=', r'exact_match\.php$'], []],
[['location', '^~', r'ignore_regex\.php$'], []]
]]]]]
)
def test_parse_from_file2(self):
with open(util.get_data_filename('edge_cases.conf')) as handle:
parsed = util.filter_comments(load(handle))
self.assertEqual(
parsed,
[[['server'], [['server_name', 'simple']]],
[['server'],
[['server_name', 'with.if'],
[['location', '~', '^/services/.+$'],
[[['if', '($request_filename', '~*', '\\.(ttf|woff)$)'],
[['add_header', 'Access-Control-Allow-Origin', '"*"']]]]]]],
[['server'],
[['server_name', 'with.complicated.headers'],
[['location', '~*', '\\.(?:gif|jpe?g|png)$'],
[['add_header', 'Pragma', 'public'],
['add_header',
'Cache-Control', '\'public, must-revalidate, proxy-revalidate\'',
'"test,;{}"', 'foo'],
['blah', '"hello;world"'],
['try_files', '$uri', '@rewrites']]]]]])
def test_parse_from_file3(self):
with open(util.get_data_filename('multiline_quotes.conf')) as handle:
parsed = util.filter_comments(load(handle))
self.assertEqual(
parsed,
[[['http'],
[[['server'],
[['listen', '*:443'],
[['location', '/'],
[['body_filter_by_lua',
'\'ngx.ctx.buffered = (ngx.ctx.buffered or "")'
' .. string.sub(ngx.arg[1], 1, 1000)\n'
' '
'if ngx.arg[2] then\n'
' '
'ngx.var.resp_body = ngx.ctx.buffered\n'
' end\'']]]]]]]])
def test_abort_on_parse_failure(self):
with open(util.get_data_filename('broken.conf')) as handle:
self.assertRaises(ParseException, load, handle)
def test_dump_as_file(self):
with open(util.get_data_filename('nginx.conf')) as handle:
parsed = load(handle)
parsed[-1][-1].append(UnspacedList([['server'],
[['listen', ' ', '443', ' ', 'ssl'],
['server_name', ' ', 'localhost'],
['ssl_certificate', ' ', 'cert.pem'],
['ssl_certificate_key', ' ', 'cert.key'],
['ssl_session_cache', ' ', 'shared:SSL:1m'],
['ssl_session_timeout', ' ', '5m'],
['ssl_ciphers', ' ', 'HIGH:!aNULL:!MD5'],
[['location', ' ', '/'],
[['root', ' ', 'html'],
['index', ' ', 'index.html', ' ', 'index.htm']]]]]))
with tempfile.TemporaryFile(mode='w+t') as f:
dump(parsed, f)
f.seek(0)
parsed_new = load(f)
self.assertEqual(parsed, parsed_new)
def test_comments(self):
with open(util.get_data_filename('minimalistic_comments.conf')) as handle:
parsed = load(handle)
with tempfile.TemporaryFile(mode='w+t') as f:
dump(parsed, f)
f.seek(0)
parsed_new = load(f)
self.assertEqual(parsed, parsed_new)
self.assertEqual(parsed_new, [
['#', " Use bar.conf when it's a full moon!"],
['include', 'foo.conf'],
['#', ' Kilroy was here'],
['check_status'],
[['server'],
[['#', ''],
['#', " Don't forget to open up your firewall!"],
['#', ''],
['listen', '1234'],
['#', ' listen 80;']]],
])
def test_issue_518(self):
parsed = loads('if ($http_accept ~* "webp") { set $webp "true"; }')
self.assertEqual(parsed, [
[['if', '($http_accept', '~*', '"webp")'],
[['set', '$webp', '"true"']]]
])
def test_comment_in_block(self):
parsed = loads("""http {
# server{
}""")
self.assertEqual(parsed, [
[['http'],
[['#', ' server{']]]
])
def test_access_log(self):
# see issue #3798
parsed = loads('access_log syslog:server=unix:/dev/log,facility=auth,'
'tag=nginx_post,severity=info custom;')
self.assertEqual(parsed, [
['access_log',
'syslog:server=unix:/dev/log,facility=auth,tag=nginx_post,severity=info',
'custom']
])
def test_add_header(self):
# see issue #3798
parsed = loads('add_header Cache-Control no-cache,no-store,must-revalidate,max-age=0;')
self.assertEqual(parsed, [
['add_header', 'Cache-Control', 'no-cache,no-store,must-revalidate,max-age=0']
])
def test_map_then_assignment_in_block(self):
# see issue #3798
test_str = """http {
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
"~Opera Mini" 1;
*.example.com 1;
}
one;
}"""
parsed = loads(test_str)
self.assertEqual(parsed, [
[['http'], [
[['map', '$http_upgrade', '$connection_upgrade'], [
['default', 'upgrade'],
["''", 'close'],
['"~Opera Mini"', '1'],
['*.example.com', '1']
]],
['one']
]]
])
def test_variable_name(self):
parsed = loads('try_files /typo3temp/tx_ncstaticfilecache/'
'$host${request_uri}index.html @nocache;')
self.assertEqual(parsed, [
['try_files',
'/typo3temp/tx_ncstaticfilecache/$host${request_uri}index.html',
'@nocache']
])
def test_weird_blocks(self):
test = r"""
if ($http_user_agent ~ MSIE) {
rewrite ^(.*)$ /msie/$1 break;
}
if ($http_cookie ~* "id=([^;]+)(?:;|$)") {
set $id $1;
}
if ($request_method = POST) {
return 405;
}
if ($request_method) {
return 403;
}
if ($args ~ post=140){
rewrite ^ http://example.com/;
}
location ~ ^/users/(.+\.(?:gif|jpe?g|png))$ {
alias /data/w3/images/$1;
}
proxy_set_header X-Origin-URI ${scheme}://${http_host}/$request_uri;
"""
parsed = loads(test)
self.assertEqual(parsed, [[['if', '($http_user_agent', '~', 'MSIE)'],
[['rewrite', '^(.*)$', '/msie/$1', 'break']]],
[['if', '($http_cookie', '~*', '"id=([^;]+)(?:;|$)")'], [['set', '$id', '$1']]],
[['if', '($request_method', '=', 'POST)'], [['return', '405']]],
[['if', '($request_method)'],
[['return', '403']]], [['if', '($args', '~', 'post=140)'],
[['rewrite', '^', 'http://example.com/']]],
[['location', '~', '^/users/(.+\\.(?:gif|jpe?g|png))$'],
[['alias', '/data/w3/images/$1']]],
['proxy_set_header', 'X-Origin-URI', '${scheme}://${http_host}/$request_uri']]
)
def test_edge_cases(self):
# quotes
parsed = loads(r'"hello\""; # blah "heh heh"')
self.assertEqual(parsed, [['"hello\\""'], ['#', ' blah "heh heh"']])
# if with comment
parsed = loads("""if ($http_cookie ~* "id=([^;]+)(?:;|$)") { # blah )
}""")
self.assertEqual(parsed, [[['if', '($http_cookie', '~*', '"id=([^;]+)(?:;|$)")'],
[['#', ' blah )']]]])
# end paren
test = """
one"test";
("two");
"test")red;
"test")"blue";
"test")"three;
(one"test")one;
one";
one"test;
one"test"one;
"""
parsed = loads(test)
self.assertEqual(parsed, [
['one"test"'],
['("two")'],
['"test")red'],
['"test")"blue"'],
['"test")"three'],
['(one"test")one'],
['one"'],
['one"test'],
['one"test"one']
])
self.assertRaises(ParseException, loads, r'"test"one;') # fails
self.assertRaises(ParseException, loads, r'"test;') # fails
# newlines
test = """
server_name foo.example.com bar.example.com \
baz.example.com qux.example.com;
server_name foo.example.com bar.example.com
baz.example.com qux.example.com;
"""
parsed = loads(test)
self.assertEqual(parsed, [
['server_name', 'foo.example.com', 'bar.example.com',
'baz.example.com', 'qux.example.com'],
['server_name', 'foo.example.com', 'bar.example.com',
'baz.example.com', 'qux.example.com']
])
# variable weirdness
parsed = loads("directive $var ${var} $ ${};")
self.assertEqual(parsed, [['directive', '$var', '${var}', '$', '${}']])
self.assertRaises(ParseException, loads, "server {server_name test.com};")
self.assertEqual(loads("blag${dfgdfg};"), [['blag${dfgdfg}']])
self.assertRaises(ParseException, loads, "blag${dfgdf{g};")
class TestUnspacedList(unittest.TestCase):
"""Test the UnspacedList data structure"""
def setUp(self):
self.a = ["\n ", "things", " ", "quirk"]
self.b = ["y", " "]
self.l = self.a[:]
self.l2 = self.b[:]
self.ul = UnspacedList(self.l)
self.ul2 = UnspacedList(self.l2)
def test_construction(self):
self.assertEqual(self.ul, ["things", "quirk"])
self.assertEqual(self.ul2, ["y"])
def test_append(self):
ul3 = copy.deepcopy(self.ul)
ul3.append("wise")
self.assertEqual(ul3, ["things", "quirk", "wise"])
self.assertEqual(ul3.spaced, self.a + ["wise"])
def test_add(self):
ul3 = self.ul + self.ul2
self.assertEqual(ul3, ["things", "quirk", "y"])
self.assertEqual(ul3.spaced, self.a + self.b)
self.assertEqual(self.ul.spaced, self.a)
ul3 = self.ul + self.l2
self.assertEqual(ul3, ["things", "quirk", "y"])
self.assertEqual(ul3.spaced, self.a + self.b)
def test_extend(self):
ul3 = copy.deepcopy(self.ul)
ul3.extend(self.ul2)
self.assertEqual(ul3, ["things", "quirk", "y"])
self.assertEqual(ul3.spaced, self.a + self.b)
self.assertEqual(self.ul.spaced, self.a)
def test_set(self):
ul3 = copy.deepcopy(self.ul)
ul3[0] = "zither"
l = ["\n ", "zather", "zest"]
ul3[1] = UnspacedList(l)
self.assertEqual(ul3, ["zither", ["zather", "zest"]])
self.assertEqual(ul3.spaced, [self.a[0], "zither", " ", l])
def test_get(self):
self.assertRaises(IndexError, self.ul2.__getitem__, 2)
self.assertRaises(IndexError, self.ul2.__getitem__, -3)
def test_insert(self):
x = UnspacedList(
[['\n ', 'listen', ' ', '69.50.225.155:9000'],
['\n ', 'listen', ' ', '127.0.0.1'],
['\n ', 'server_name', ' ', '.example.com'],
['\n ', 'server_name', ' ', 'example.*'], '\n',
['listen', ' ', '5001', ' ', 'ssl']])
x.insert(5, "FROGZ")
self.assertEqual(x,
[['listen', '69.50.225.155:9000'], ['listen', '127.0.0.1'],
['server_name', '.example.com'], ['server_name', 'example.*'],
['listen', '5001', 'ssl'], 'FROGZ'])
self.assertEqual(x.spaced,
[['\n ', 'listen', ' ', '69.50.225.155:9000'],
['\n ', 'listen', ' ', '127.0.0.1'],
['\n ', 'server_name', ' ', '.example.com'],
['\n ', 'server_name', ' ', 'example.*'], '\n',
['listen', ' ', '5001', ' ', 'ssl'],
'FROGZ'])
def test_rawlists(self):
ul3 = copy.deepcopy(self.ul)
ul3.insert(0, "some")
ul3.append("why")
ul3.extend(["did", "whether"])
del ul3[2]
self.assertEqual(ul3, ["some", "things", "why", "did", "whether"])
def test_is_dirty(self):
self.assertEqual(False, self.ul2.is_dirty())
ul3 = UnspacedList([])
ul3.append(self.ul)
self.assertEqual(False, self.ul.is_dirty())
self.assertEqual(True, ul3.is_dirty())
ul4 = UnspacedList([[1], [2, 3, 4]])
self.assertEqual(False, ul4.is_dirty())
ul4[1][2] = 5
self.assertEqual(True, ul4.is_dirty())
if __name__ == '__main__':
unittest.main() # pragma: no cover
| 37.879819
| 95
| 0.449386
| 16,319
| 0.976893
| 0
| 0
| 0
| 0
| 0
| 0
| 6,625
| 0.396588
|
7fc49c5390bfb96b900f097bb43b1a2528a313d1
| 6,522
|
py
|
Python
|
pysnmp-with-texts/Intel-Common-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/Intel-Common-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/Intel-Common-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module Intel-Common-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Intel-Common-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:54:14 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ModuleIdentity, ObjectIdentity, iso, Integer32, Bits, Counter64, Counter32, Gauge32, NotificationType, Unsigned32, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, TimeTicks, enterprises = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "ObjectIdentity", "iso", "Integer32", "Bits", "Counter64", "Counter32", "Gauge32", "NotificationType", "Unsigned32", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "TimeTicks", "enterprises")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
intel = MibIdentifier((1, 3, 6, 1, 4, 1, 343))
identifiers = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2))
experimental = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 3))
information_technology = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 4)).setLabel("information-technology")
sysProducts = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 5))
mib2ext = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 6))
hw = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 7))
wekiva = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 111))
systems = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1))
objects = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 2))
comm_methods = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 3)).setLabel("comm-methods")
pc_systems = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 1)).setLabel("pc-systems")
proxy_systems = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 2)).setLabel("proxy-systems")
hub_systems = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3)).setLabel("hub-systems")
switch_systems = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 4)).setLabel("switch-systems")
local_proxy_1 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 3, 1)).setLabel("local-proxy-1")
pc_novell_1 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 3, 2)).setLabel("pc-novell-1")
express10_100Stack = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 1)).setLabel("express10-100Stack")
express12TX = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 2))
express24TX = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 3))
expressReserved = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 4))
expressBridge = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 6))
express210_12 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 7)).setLabel("express210-12")
express210_24 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 8)).setLabel("express210-24")
express220_12 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 9)).setLabel("express220-12")
express220_24 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 10)).setLabel("express220-24")
express300Stack = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 11))
express320_16 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 12)).setLabel("express320-16")
express320_24 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 1, 1, 3, 13)).setLabel("express320-24")
pc_products = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 1)).setLabel("pc-products")
hub_products = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 2)).setLabel("hub-products")
proxy = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 3))
print_products = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 4)).setLabel("print-products")
network_products = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 5)).setLabel("network-products")
snmp_agents = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 6)).setLabel("snmp-agents")
nic_products = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 7)).setLabel("nic-products")
server_management = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 10)).setLabel("server-management")
switch_products = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 11)).setLabel("switch-products")
i2o = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 120))
express110 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 2, 1))
netport_1 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 4, 1)).setLabel("netport-1")
netport_2 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 4, 2)).setLabel("netport-2")
netport_express = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 4, 3)).setLabel("netport-express")
lanDesk = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 5, 1))
ld_alarms = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 5, 1, 1)).setLabel("ld-alarms")
internetServer_2 = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 5, 2)).setLabel("internetServer-2")
iS_alarms = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 2, 5, 2, 1)).setLabel("iS-alarms")
mibBuilder.exportSymbols("Intel-Common-MIB", express220_24=express220_24, express110=express110, snmp_agents=snmp_agents, switch_systems=switch_systems, objects=objects, proxy=proxy, lanDesk=lanDesk, express12TX=express12TX, mib2ext=mib2ext, experimental=experimental, express210_24=express210_24, sysProducts=sysProducts, netport_1=netport_1, internetServer_2=internetServer_2, intel=intel, pc_novell_1=pc_novell_1, products=products, express320_24=express320_24, proxy_systems=proxy_systems, express320_16=express320_16, identifiers=identifiers, express300Stack=express300Stack, wekiva=wekiva, express10_100Stack=express10_100Stack, hub_systems=hub_systems, ld_alarms=ld_alarms, server_management=server_management, switch_products=switch_products, i2o=i2o, netport_express=netport_express, network_products=network_products, expressBridge=expressBridge, express220_12=express220_12, local_proxy_1=local_proxy_1, systems=systems, comm_methods=comm_methods, express210_12=express210_12, pc_products=pc_products, hub_products=hub_products, expressReserved=expressReserved, netport_2=netport_2, pc_systems=pc_systems, hw=hw, express24TX=express24TX, print_products=print_products, information_technology=information_technology, iS_alarms=iS_alarms, nic_products=nic_products)
| 103.52381
| 1,274
| 0.713891
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,313
| 0.201319
|
7fc5dfd088a228987587fd982a1eb94c9c4b2b71
| 4,430
|
py
|
Python
|
src/python/pants/jvm/resolve/coursier_setup.py
|
Eric-Arellano/pants
|
aaa9756bc4f2cc97bb97851a4295a0de85f374b1
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/jvm/resolve/coursier_setup.py
|
Eric-Arellano/pants
|
aaa9756bc4f2cc97bb97851a4295a0de85f374b1
|
[
"Apache-2.0"
] | 12
|
2022-01-06T23:20:22.000Z
|
2022-03-17T05:06:37.000Z
|
src/python/pants/jvm/resolve/coursier_setup.py
|
Eric-Arellano/pants
|
aaa9756bc4f2cc97bb97851a4295a0de85f374b1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import textwrap
from dataclasses import dataclass
from typing import ClassVar, Iterable
from pants.core.util_rules.external_tool import (
DownloadedExternalTool,
ExternalToolRequest,
TemplatedExternalTool,
)
from pants.engine.fs import CreateDigest, Digest, FileContent, MergeDigests
from pants.engine.platform import Platform
from pants.engine.rules import Get, MultiGet, collect_rules, rule
COURSIER_POST_PROCESSING_SCRIPT = textwrap.dedent(
"""\
import json
import sys
from pathlib import PurePath
from shutil import copyfile
report = json.load(open(sys.argv[1]))
classpath = set()
for dep in report['dependencies']:
file_path = PurePath(dep['file'])
classpath_dest = f"classpath/{file_path.name}"
if classpath_dest in classpath:
raise Exception(f"Found duplicate jar name {file_path.name}, which isn't currently supported")
classpath.add(classpath_dest)
copyfile(file_path, classpath_dest)
"""
)
COURSIER_WRAPPER_SCRIPT = textwrap.dedent(
"""\
set -eux
coursier_exe="$1"
shift
json_output_file="$1"
shift
"$coursier_exe" fetch --json-output-file="$json_output_file" "$@"
/bin/mkdir -p classpath
/usr/bin/python3 coursier_post_processing_script.py "$json_output_file"
"""
)
class CoursierBinary(TemplatedExternalTool):
options_scope = "coursier"
name = "coursier"
help = "A dependency resolver for the Maven ecosystem."
default_version = "v2.0.13"
default_known_versions = [
"v2.0.13|linux_arm64 |8d428bede2d9d0e48ffad8360d49de48bd0c2c3b0e54e82e3a7665019b65e4d0|58622664",
"v2.0.13|linux_x86_64|1ae089789cc4b0a4d296d6852b760d7f8bf72805267a6b7571e99b681d5e13b4|59652208",
"v2.0.13|macos_arm64 |d74b8fe4ffc2f4e9011d7151722fc8b5ffca8a72b3bc4188c61df3326228c4ef|57625024",
"v2.0.13|macos_x86_64|d74b8fe4ffc2f4e9011d7151722fc8b5ffca8a72b3bc4188c61df3326228c4ef|57625024",
]
default_url_template = (
"https://github.com/coursier/coursier/releases/download/{version}/cs-{platform}"
)
default_url_platform_mapping = {
"macos_arm64": "x86_64-apple-darwin",
"macos_x86_64": "x86_64-apple-darwin",
"linux_arm64": "aarch64-pc-linux",
"linux_x86_64": "x86_64-pc-linux",
}
@dataclass(frozen=True)
class Coursier:
"""The Coursier tool and various utilities, materialzed to a `Digest` and ready to use."""
coursier: DownloadedExternalTool
digest: Digest
wrapper_script: ClassVar[str] = "coursier_wrapper_script.sh"
post_processing_script: ClassVar[str] = "coursier_post_processing_script.py"
cache_name: ClassVar[str] = "coursier"
cache_dir: ClassVar[str] = ".cache"
def args(self, args: Iterable[str], *, wrapper: Iterable[str] = ()) -> tuple[str, ...]:
return tuple((*wrapper, self.coursier.exe, *args, "--cache", f"{self.cache_dir}"))
@property
def append_only_caches(self) -> dict[str, str]:
return {self.cache_name: self.cache_dir}
@rule
async def setup_coursier(coursier_binary: CoursierBinary) -> Coursier:
downloaded_coursier_get = Get(
DownloadedExternalTool, ExternalToolRequest, coursier_binary.get_request(Platform.current)
)
wrapper_scripts_digest_get = Get(
Digest,
CreateDigest(
[
FileContent(
Coursier.wrapper_script,
COURSIER_WRAPPER_SCRIPT.encode("utf-8"),
is_executable=True,
),
FileContent(
Coursier.post_processing_script,
COURSIER_POST_PROCESSING_SCRIPT.encode("utf-8"),
is_executable=True,
),
]
),
)
downloaded_coursier, wrapper_scripts_digest = await MultiGet(
downloaded_coursier_get, wrapper_scripts_digest_get
)
return Coursier(
coursier=downloaded_coursier,
digest=await Get(
Digest,
MergeDigests(
[
downloaded_coursier.digest,
wrapper_scripts_digest,
]
),
),
)
def rules():
return [*collect_rules()]
| 31.41844
| 106
| 0.66614
| 1,693
| 0.382167
| 0
| 0
| 1,886
| 0.425734
| 1,160
| 0.261851
| 1,824
| 0.411738
|
7fc62c697596e38c94228733e0069508642f51ad
| 198
|
py
|
Python
|
app/emails/__init__.py
|
zollf/CITS3200
|
95fb7569dad325c057e441cd7265d3e85735c058
|
[
"CC0-1.0"
] | null | null | null |
app/emails/__init__.py
|
zollf/CITS3200
|
95fb7569dad325c057e441cd7265d3e85735c058
|
[
"CC0-1.0"
] | null | null | null |
app/emails/__init__.py
|
zollf/CITS3200
|
95fb7569dad325c057e441cd7265d3e85735c058
|
[
"CC0-1.0"
] | null | null | null |
from django.apps import AppConfig
class EmailAppConfig(AppConfig):
name = 'app.emails'
label = 'email_app'
verbose_name = 'Emails App'
default_app_config = 'app.emails.EmailAppConfig'
| 22
| 48
| 0.737374
| 112
| 0.565657
| 0
| 0
| 0
| 0
| 0
| 0
| 62
| 0.313131
|
7fc685dc97d5c6a0bef64129b54db775abc19da1
| 21,614
|
py
|
Python
|
polyaxon_schemas/layers/core.py
|
gzcf/polyaxon-schemas
|
a381280cd7535f64158d52f0a9eff2afec997d90
|
[
"MIT"
] | null | null | null |
polyaxon_schemas/layers/core.py
|
gzcf/polyaxon-schemas
|
a381280cd7535f64158d52f0a9eff2afec997d90
|
[
"MIT"
] | null | null | null |
polyaxon_schemas/layers/core.py
|
gzcf/polyaxon-schemas
|
a381280cd7535f64158d52f0a9eff2afec997d90
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from marshmallow import fields, post_dump, post_load, validate
from polyaxon_schemas.constraints import ConstraintSchema
from polyaxon_schemas.initializations import (
GlorotNormalInitializerConfig,
InitializerSchema,
ZerosInitializerConfig
)
from polyaxon_schemas.layers.base import BaseLayerConfig, BaseLayerSchema
from polyaxon_schemas.regularizations import RegularizerSchema
from polyaxon_schemas.utils import ACTIVATION_VALUES, DType, StrOrFct
class MaskingSchema(BaseLayerSchema):
mask_value = fields.Int()
class Meta:
ordered = True
@post_load
def make(self, data):
return MaskingConfig(**data)
@post_dump
def unmake(self, data):
return MaskingConfig.remove_reduced_attrs(data)
class MaskingConfig(BaseLayerConfig):
"""Masks a sequence by using a mask value to skip timesteps.
For each timestep in the input tensor (dimension #1 in the tensor),
if all values in the input tensor at that timestep
are equal to `mask_value`, then the timestep will be masked (skipped)
in all downstream layers (as long as they support masking).
If any downstream layer does not support masking yet receives such
an input mask, an exception will be raised.
Example:
Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,
to be fed to a LSTM layer.
You want to mask timestep #3 and #5 because you lack data for
these timesteps. You can:
- set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`
- insert a `Masking` layer with `mask_value=0.` before the LSTM layer:
```python
x = Masking(mask_value=0., input_shape=(timesteps, features))(x)
x = LSTM(32)(x)
```
Polyaxonfile usage:
```yaml
Masking:
mask_value: 0
```
"""
IDENTIFIER = 'Masking'
SCHEMA = MaskingSchema
def __init__(self, mask_value=0., **kwargs):
super(MaskingConfig, self).__init__(**kwargs)
self.mask_value = mask_value
class DropoutSchema(BaseLayerSchema):
rate = fields.Float(validate=validate.Range(0, 1))
noise_shape = fields.List(fields.Int(), default=None, missing=None)
seed = fields.Int(default=None, missing=None)
class Meta:
ordered = True
@post_load
def make(self, data):
return DropoutConfig(**data)
@post_dump
def unmake(self, data):
return DropoutConfig.remove_reduced_attrs(data)
class DropoutConfig(BaseLayerConfig):
"""Applies Dropout to the input.
Dropout consists in randomly setting
a fraction `rate` of input units to 0 at each update during training time,
which helps prevent overfitting.
Args:
rate: float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Polyaxonfile usage:
```yaml
Dropout:
rate: 0.5
```
"""
IDENTIFIER = 'Dropout'
SCHEMA = DropoutSchema
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(DropoutConfig, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
class SpatialDropout1DSchema(DropoutSchema):
class Meta:
ordered = True
@post_load
def make(self, data):
return SpatialDropout1DConfig(**data)
@post_dump
def unmake(self, data):
return SpatialDropout1DConfig.remove_reduced_attrs(data)
class SpatialDropout1DConfig(DropoutConfig):
"""Spatial 1D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 1D feature maps instead of individual elements. If adjacent frames
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout1D will help promote independence
between feature maps and should be used instead.
Args:
rate: float between 0 and 1. Fraction of the input units to drop.
Input shape:
3D tensor with shape:
`(samples, timesteps, channels)`
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
Polyaxonfile usage:
```yaml
SpatialDropout1D:
rate: 0.5
```
"""
IDENTIFIER = 'SpatialDropout1D'
SCHEMA = SpatialDropout1DSchema
class SpatialDropout2DSchema(DropoutSchema):
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
class Meta:
ordered = True
@post_load
def make(self, data):
return SpatialDropout2DConfig(**data)
@post_dump
def unmake(self, data):
return SpatialDropout2DConfig.remove_reduced_attrs(data)
class SpatialDropout2DConfig(DropoutConfig):
"""Spatial 2D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 2D feature maps instead of individual elements. If adjacent pixels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout2D will help promote independence
between feature maps and should be used instead.
Args:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension
(the depth) is at index 1,
in 'channels_last' mode is it at index 3.
If you never set it, then it will be "channels_last".
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
Polyaxonfile usage:
```yaml
SpatialDropout2D:
rate: 0.5
```
"""
IDENTIFIER = 'SpatialDropout2D'
SCHEMA = SpatialDropout2DSchema
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout2DConfig, self).__init__(rate, **kwargs)
self.data_format = data_format
class SpatialDropout3DSchema(DropoutSchema):
data_format = fields.Str(default=None, missing=None,
validate=validate.OneOf('channels_first', 'channels_last'))
class Meta:
ordered = True
@post_load
def make(self, data):
return SpatialDropout3DConfig(**data)
@post_dump
def unmake(self, data):
return SpatialDropout3DConfig.remove_reduced_attrs(data)
class SpatialDropout3DConfig(DropoutConfig):
"""Spatial 3D version of Dropout.
This version performs the same function as Dropout, however it drops
entire 3D feature maps instead of individual elements. If adjacent voxels
within feature maps are strongly correlated (as is normally the case in
early convolution layers) then regular dropout will not regularize the
activations and will otherwise just result in an effective learning rate
decrease. In this case, SpatialDropout3D will help promote independence
between feature maps and should be used instead.
Args:
rate: float between 0 and 1. Fraction of the input units to drop.
data_format: 'channels_first' or 'channels_last'.
In 'channels_first' mode, the channels dimension (the depth)
is at index 1, in 'channels_last' mode is it at index 4.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
`(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'
or 5D tensor with shape:
`(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.
Output shape:
Same as input
References:
- [Efficient Object Localization Using Convolutional
Networks](https://arxiv.org/abs/1411.4280)
Polyaxonfile usage:
```yaml
SpatialDropout3D:
rate: 0.5
```
"""
IDENTIFIER = 'SpatialDropout3D'
SCHEMA = SpatialDropout3DSchema
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3DConfig, self).__init__(rate, **kwargs)
self.data_format = data_format
class ActivationSchema(BaseLayerSchema):
activation = StrOrFct(allow_none=True, validate=validate.OneOf(ACTIVATION_VALUES))
class Meta:
ordered = True
@post_load
def make(self, data):
return ActivationConfig(**data)
@post_dump
def unmake(self, data):
return ActivationConfig.remove_reduced_attrs(data)
class ActivationConfig(BaseLayerConfig):
"""Applies an activation function to an output.
Args:
activation: name of activation function.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
Polyaxonfile usage:
```yaml
Activation:
activation: tanh
```
"""
IDENTIFIER = 'Activation'
SCHEMA = ActivationSchema
def __init__(self, activation, **kwargs):
super(ActivationConfig, self).__init__(**kwargs)
self.activation = activation
class ReshapeSchema(BaseLayerSchema):
target_shape = fields.List(fields.Int())
class Meta:
ordered = True
@post_load
def make(self, data):
return ReshapeConfig(**data)
@post_dump
def unmake(self, data):
return ReshapeConfig.remove_reduced_attrs(data)
class ReshapeConfig(BaseLayerConfig):
"""Reshapes an output to a certain shape.
Args:
target_shape: target shape. Tuple of integers,
does not include the samples dimension (batch size).
Input shape:
Arbitrary, although all dimensions in the input shaped must be fixed.
Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
`(batch_size,) + target_shape`
Example:
```python
# as first layer in a Sequential model
x = Reshape((3, 4))(x)
# now: x.output_shape == (None, 3, 4)
# note: `None` is the batch dimension
# also supports shape inference using `-1` as dimension
x = Reshape((-1, 2, 2))(x)
# now: x.output_shape == (None, 3, 2, 2)
```
Polyaxonfile usage:
```yaml
Reshape:
target_shape: [-1, 2, 2]
```
"""
IDENTIFIER = 'Reshape'
SCHEMA = ReshapeSchema
def __init__(self, target_shape, **kwargs):
super(ReshapeConfig, self).__init__(**kwargs)
self.target_shape = target_shape
class PermuteSchema(BaseLayerSchema):
dims = fields.List(fields.Int())
class Meta:
ordered = True
@post_load
def make(self, data):
return PermuteConfig(**data)
@post_dump
def unmake(self, data):
return PermuteConfig.remove_reduced_attrs(data)
class PermuteConfig(BaseLayerConfig):
"""Permutes the dimensions of the input according to a given pattern.
Useful for e.g. connecting RNNs and convnets together.
Args:
dims: Tuple of integers. Permutation pattern, does not include the
samples dimension. Indexing starts at 1.
For instance, `(2, 1)` permutes the first and second dimension
of the input.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same as the input shape, but with the dimensions re-ordered according
to the specified pattern.
Example:
```python
x = Permute((2, 1), input_shape=(10, 64))(x)
# now: X.output_shape == (None, 64, 10)
# note: `None` is the batch dimension
```
Polyaxonfile usage:
```yaml
Reshape:
target_shape: [-1, 2, 2]
```
"""
IDENTIFIER = 'Permute'
SCHEMA = PermuteSchema
def __init__(self, dims, **kwargs):
super(PermuteConfig, self).__init__(**kwargs)
self.dims = dims
class FlattenSchema(BaseLayerSchema):
class Meta:
ordered = True
@post_load
def make(self, data):
return FlattenConfig(**data)
@post_dump
def unmake(self, data):
return FlattenConfig.remove_reduced_attrs(data)
class FlattenConfig(BaseLayerConfig):
"""Flattens the input. Does not affect the batch size.
Example:
```python
x = Convolution2D(64, 3, 3,
border_mode='same',
input_shape=(3, 32, 32))(x)
# now: x.output_shape == (None, 64, 32, 32)
x = Flatten()(x)
# now: x.output_shape == (None, 65536)
```
Polyaxonfile usage:
```yaml
Flatten:
```
"""
IDENTIFIER = 'Flatten'
SCHEMA = FlattenSchema
class RepeatVectorSchema(BaseLayerSchema):
n = fields.Int()
class Meta:
ordered = True
@post_load
def make(self, data):
return RepeatVectorConfig(**data)
@post_dump
def unmake(self, data):
return RepeatVectorConfig.remove_reduced_attrs(data)
class RepeatVectorConfig(BaseLayerConfig):
"""Repeats the input n times.
Example:
```python
x = Dense(32)(x)
# now: x.output_shape == (None, 32)
# note: `None` is the batch dimension
x = RepeatVector(3)(x)
# now: x.output_shape == (None, 3, 32)
```
Args:
n: integer, repetition factor.
Input shape:
2D tensor of shape `(num_samples, features)`.
Output shape:
3D tensor of shape `(num_samples, n, features)`.
Polyaxonfile usage:
```yaml
RepeatVector:
n: 32
```
"""
IDENTIFIER = 'RepeatVector'
SCHEMA = RepeatVectorSchema
def __init__(self, n, **kwargs):
super(RepeatVectorConfig, self).__init__(**kwargs)
self.n = n
# class LambdaSchema(BaseLayerSchema):
class DenseSchema(BaseLayerSchema):
units = fields.Int()
activation = StrOrFct(allow_none=True, validate=validate.OneOf(ACTIVATION_VALUES))
use_bias = fields.Bool(allow_none=True)
kernel_initializer = fields.Nested(InitializerSchema, allow_none=True)
bias_initializer = fields.Nested(InitializerSchema, allow_none=True)
kernel_regularizer = fields.Nested(RegularizerSchema, allow_none=True)
bias_regularizer = fields.Nested(RegularizerSchema, allow_none=True)
activity_regularizer = fields.Nested(RegularizerSchema, allow_none=True)
kernel_constraint = fields.Nested(ConstraintSchema, allow_none=True)
bias_constraint = fields.Nested(ConstraintSchema, allow_none=True)
class Meta:
ordered = True
@post_load
def make(self, data):
return DenseConfig(**data)
@post_dump
def unmake(self, data):
return DenseConfig.remove_reduced_attrs(data)
class DenseConfig(BaseLayerConfig):
"""Just your regular densely-connected NN layer.
`Dense` implements the operation:
`output = activation(dot(input, kernel) + bias)`
where `activation` is the element-wise activation function
passed as the `activation` argument, `kernel` is a weights matrix
created by the layer, and `bias` is a bias vector created by the layer
(only applicable if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then
it is flattened prior to the initial dot product with `kernel`.
Example:
```python
# as first layer in a sequential model:
x = Dense(32)(x)
# now the model will take as input arrays of shape (*, 16)
# and output arrays of shape (*, 32)
```
Args:
units: Positive integer, dimensionality of the output space.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to
the `kernel` weights matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to
the output of the layer (its "activation")..
kernel_constraint: Constraint function applied to
the `kernel` weights matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
nD tensor with shape: `(batch_size, ..., input_dim)`.
The most common situation would be
a 2D input with shape `(batch_size, input_dim)`.
Output shape:
nD tensor with shape: `(batch_size, ..., units)`.
For instance, for a 2D input with shape `(batch_size, input_dim)`,
the output would have shape `(batch_size, units)`.
Polyaxonfile usage:
```yaml
Dense:
units: 32
activation: sigmoid
```
"""
IDENTIFIER = 'Dense'
SCHEMA = DenseSchema
def __init__(self,
units,
activation=None,
use_bias=True,
kernel_initializer=GlorotNormalInitializerConfig(),
bias_initializer=ZerosInitializerConfig(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(DenseConfig, self).__init__(**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.activity_regularizer = activity_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
class ActivityRegularizationSchema(BaseLayerSchema):
l1 = fields.Float(allow_none=True)
l2 = fields.Float(allow_none=True)
class Meta:
ordered = True
@post_load
def make(self, data):
return ActivityRegularizationConfig(**data)
@post_dump
def unmake(self, data):
return ActivityRegularizationConfig.remove_reduced_attrs(data)
class ActivityRegularizationConfig(BaseLayerConfig):
"""Layer that applies an update to the cost function based input activity.
Args:
l1: L1 regularization factor (positive float).
l2: L2 regularization factor (positive float).
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
Polyaxonfile usage:
```yaml
ActivityRegularization:
l1: 0.1
l2: 0.2
```
"""
IDENTIFIER = 'ActivityRegularization'
SCHEMA = ActivityRegularizationSchema
def __init__(self, l1=0., l2=0., **kwargs):
super(ActivityRegularizationConfig, self).__init__(**kwargs)
self.l1 = l1
self.l2 = l2
class CastSchema(BaseLayerSchema):
dtype = DType()
class Meta:
ordered = True
@post_load
def make(self, data):
return CastConfig(**data)
@post_dump
def unmake(self, data):
return CastConfig.remove_reduced_attrs(data)
class CastConfig(BaseLayerConfig):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
x = tf.constant([1.8, 2.2], dtype=tf.float32)
x = Cast(dtype=tf.int32)(x) # [1, 2], dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
Polyaxonfile usage:
```yaml
Cast:
dtype: float32
```
"""
IDENTIFIER = 'Cast'
SCHEMA = CastSchema
def __init__(self, dtype, **kwargs):
super(CastConfig, self).__init__(**kwargs)
self.dtype = dtype
| 28.364829
| 88
| 0.659434
| 20,942
| 0.968909
| 0
| 0
| 2,261
| 0.104608
| 0
| 0
| 12,461
| 0.576524
|
7fc71d742b9583424424ab4953dff97d093bc116
| 5,556
|
py
|
Python
|
tests/unit/models/cardlesscredit/test_create_payment.py
|
glendaesutanto/xendit-python
|
f9b131882ff7d045f2e2c6518933d1594efba3e6
|
[
"MIT"
] | 10
|
2020-10-31T23:34:34.000Z
|
2022-03-08T19:08:55.000Z
|
tests/unit/models/cardlesscredit/test_create_payment.py
|
glendaesutanto/xendit-python
|
f9b131882ff7d045f2e2c6518933d1594efba3e6
|
[
"MIT"
] | 22
|
2020-07-30T14:25:07.000Z
|
2022-03-31T03:55:46.000Z
|
tests/unit/models/cardlesscredit/test_create_payment.py
|
glendaesutanto/xendit-python
|
f9b131882ff7d045f2e2c6518933d1594efba3e6
|
[
"MIT"
] | 11
|
2020-07-28T08:09:40.000Z
|
2022-03-18T00:14:02.000Z
|
import pytest
from ..model_base_test import ModelBaseTest
from tests.sampleresponse.cardless_credit import cardless_credit_payment_response
from xendit.models import CardlessCredit, CardlessCreditType
# fmt: off
class TestCreateCardlessCreditPayment(ModelBaseTest):
@pytest.fixture
def default_cardless_credit_data(self):
tested_class = CardlessCredit
class_name = "CardlessCredit"
method_name = "create_payment"
http_method_name = "post"
cardless_credit_items = []
cardless_credit_items.append(
CardlessCredit.helper_create_item(
id="item-123",
name="Phone Case",
price=200000,
type="Smartphone",
url="http://example.com/phone/phone_case",
quantity=2,
)
)
shipping_address = CardlessCredit.helper_create_shipping_address(
first_name="first name",
last_name="last name",
address="Jl Teknologi No. 12",
city="Jakarta",
postal_code="12345",
phone="081513114262",
country_code="IDN",
)
customer_details = CardlessCredit.helper_create_customer_details(
first_name="customer first name",
last_name="customer last name",
email="customer@email.com",
phone="0812332145",
)
args = ()
kwargs = {
"cardless_credit_type": CardlessCreditType.KREDIVO,
"external_id": "mock-id-123",
"amount": 10000,
"payment_type": "3_months",
"items": cardless_credit_items,
"customer_details": customer_details,
"shipping_address": shipping_address,
"redirect_url": "https://mock-my-shop.com/home",
"callback_url": "https://mock-my-shop.com/callback",
"x_idempotency_key": "test_idemp_123",
}
params = (args, kwargs)
url = "/cardless-credit"
expected_correct_result = cardless_credit_payment_response()
return (tested_class, class_name, method_name, http_method_name, url, params, expected_correct_result)
@pytest.fixture
def api_requestor_request_data(self, default_cardless_credit_data):
tested_class, class_name, method_name, http_method_name, url, params, _ = default_cardless_credit_data
headers = {"X-IDEMPOTENCY-KEY": "test_idemp_123"}
body = {
"cardless_credit_type": "KREDIVO",
"external_id": "mock-id-123",
"amount": 10000,
"payment_type": "3_months",
"items": [
{
"id": "item-123",
"name": "Phone Case",
"price": 200000,
"type": "Smartphone",
"url": "http://example.com/phone/phone_case",
"quantity": 2,
}
],
"customer_details": {
"first_name": "customer first name",
"last_name": "customer last name",
"email": "customer@email.com",
"phone": "0812332145",
},
"shipping_address": {
"first_name": "first name",
"last_name": "last name",
"address": "Jl Teknologi No. 12",
"city": "Jakarta",
"postal_code": "12345",
"phone": "081513114262",
"country_code": "IDN",
},
"redirect_url": "https://mock-my-shop.com/home",
"callback_url": "https://mock-my-shop.com/callback",
}
return (tested_class, class_name, method_name, http_method_name, url, params, headers, body)
@pytest.mark.parametrize("mock_correct_response", [cardless_credit_payment_response()], indirect=True)
def test_return_cardless_credit_payment_on_correct_params(
self, mocker, mock_correct_response, default_cardless_credit_data
):
self.run_success_return_test_on_xendit_instance(mocker, mock_correct_response, default_cardless_credit_data)
def test_raise_xendit_error_on_response_error(
self, mocker, mock_error_request_response, default_cardless_credit_data
):
self.run_raises_error_test_on_xendit_instance(mocker, mock_error_request_response, default_cardless_credit_data)
@pytest.mark.parametrize("mock_correct_response", [cardless_credit_payment_response()], indirect=True)
def test_return_cardless_credit_payment_on_correct_params_and_global_xendit(
self, mocker, mock_correct_response, default_cardless_credit_data
):
self.run_success_return_test_on_global_config(mocker, mock_correct_response, default_cardless_credit_data)
def test_raise_xendit_error_on_response_error_and_global_xendit(
self, mocker, mock_error_request_response, default_cardless_credit_data
):
self.run_raises_error_test_on_global_config(mocker, mock_error_request_response, default_cardless_credit_data)
@pytest.mark.parametrize("mock_correct_response", [cardless_credit_payment_response()], indirect=True)
def test_send_correct_request_to_api_requestor(self, mocker, mock_correct_response, api_requestor_request_data):
self.run_send_correct_request_to_api_requestor(mocker, mock_correct_response, api_requestor_request_data)
# fmt: on
| 44.095238
| 121
| 0.62203
| 5,320
| 0.957523
| 0
| 0
| 4,683
| 0.842873
| 0
| 0
| 1,252
| 0.225342
|
7fc87ac068a828700f0e5927697f90ef933d4e60
| 293
|
py
|
Python
|
docs/examples/http_proxy/constructor_argument.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 4
|
2017-11-14T17:24:12.000Z
|
2020-10-30T01:46:02.000Z
|
docs/examples/http_proxy/constructor_argument.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 11
|
2017-01-29T08:59:21.000Z
|
2018-07-02T09:17:47.000Z
|
docs/examples/http_proxy/constructor_argument.py
|
dupontz/libcloud
|
419c69441ea10e7bbf37319e5e8d02e82e7e6b40
|
[
"Apache-2.0"
] | 4
|
2016-04-04T08:01:48.000Z
|
2018-06-06T08:04:36.000Z
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
PROXY_URL_NO_AUTH_1 = 'http://<proxy hostname 1>:<proxy port 2>'
cls = get_driver(Provider.RACKSPACE)
driver = cls('username', 'api key', region='ord',
http_proxy=PROXY_URL_NO_AUTH_1)
| 32.555556
| 64
| 0.750853
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 66
| 0.225256
|
7fc8a85a68b8ccffabd8645da52a646787f3b6c2
| 2,576
|
py
|
Python
|
cakechat/dialog_model/factory.py
|
jacswork/cakechat
|
d46c3ef05be8adfeac5d48ff1cfcefb87ac1eb2e
|
[
"Apache-2.0"
] | 1
|
2020-03-20T18:38:47.000Z
|
2020-03-20T18:38:47.000Z
|
cakechat/dialog_model/factory.py
|
jacswork/cakechat
|
d46c3ef05be8adfeac5d48ff1cfcefb87ac1eb2e
|
[
"Apache-2.0"
] | 64
|
2019-07-05T06:06:43.000Z
|
2021-08-02T05:22:31.000Z
|
cakechat/dialog_model/factory.py
|
Spark3757/chatbot
|
4e8eae70af2d5b68564d86b7ea0dbec956ae676f
|
[
"Apache-2.0"
] | 1
|
2020-12-04T15:25:45.000Z
|
2020-12-04T15:25:45.000Z
|
import os
from cakechat.config import BASE_CORPUS_NAME, S3_MODELS_BUCKET_NAME, S3_TOKENS_IDX_REMOTE_DIR, \
S3_NN_MODEL_REMOTE_DIR, S3_CONDITIONS_IDX_REMOTE_DIR
from cakechat.dialog_model.model import get_nn_model
from cakechat.utils.s3 import S3FileResolver
from cakechat.utils.text_processing import get_index_to_token_path, load_index_to_item, get_index_to_condition_path
def _get_index_to_token(fetch_from_s3):
index_to_token_path = get_index_to_token_path(BASE_CORPUS_NAME)
if fetch_from_s3:
tokens_idx_resolver = S3FileResolver(index_to_token_path, S3_MODELS_BUCKET_NAME, S3_TOKENS_IDX_REMOTE_DIR)
if not tokens_idx_resolver.resolve():
raise Exception('Can\'t get index_to_token because file does not exist at S3')
else:
if not os.path.exists(index_to_token_path):
raise Exception('Can\'t get index_to_token because file does not exist. '
'Run tools/download_model.py first to get all required files or construct it by yourself.')
return load_index_to_item(index_to_token_path)
def _get_index_to_condition(fetch_from_s3):
index_to_condition_path = get_index_to_condition_path(BASE_CORPUS_NAME)
if fetch_from_s3:
index_to_condition_resolver = S3FileResolver(index_to_condition_path, S3_MODELS_BUCKET_NAME,
S3_CONDITIONS_IDX_REMOTE_DIR)
if not index_to_condition_resolver.resolve():
raise Exception('Can\'t get index_to_condition because file does not exist at S3')
else:
if not os.path.exists(index_to_condition_path):
raise Exception('Can\'t get index_to_condition because file does not exist. '
'Run tools/download_model.py first to get all required files or construct it by yourself.')
return load_index_to_item(index_to_condition_path)
def get_trained_model(reverse=False, fetch_from_s3=True):
if fetch_from_s3:
resolver_factory = S3FileResolver.init_resolver(
bucket_name=S3_MODELS_BUCKET_NAME, remote_dir=S3_NN_MODEL_REMOTE_DIR)
else:
resolver_factory = None
nn_model, model_exists = get_nn_model(
_get_index_to_token(fetch_from_s3),
_get_index_to_condition(fetch_from_s3),
resolver_factory=resolver_factory,
is_reverse_model=reverse)
if not model_exists:
raise Exception('Can\'t get the model. '
'Run tools/download_model.py first to get all required files or train it by yourself.')
return nn_model
| 45.192982
| 119
| 0.733696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 534
| 0.207298
|
7fc9f53a7aff684d5bb0d1b56fcc2703e86c8f57
| 532
|
py
|
Python
|
WhileLoop/GraduationPt.2.py
|
Rohitm619/Softuni-Python-Basic
|
03c9d0b44f5652c99db3b0e42014dd5af50205a2
|
[
"MIT"
] | 1
|
2020-09-22T13:25:34.000Z
|
2020-09-22T13:25:34.000Z
|
WhileLoop/GraduationPt.2.py
|
Rohitm619/Softuni-Python-Basic
|
03c9d0b44f5652c99db3b0e42014dd5af50205a2
|
[
"MIT"
] | null | null | null |
WhileLoop/GraduationPt.2.py
|
Rohitm619/Softuni-Python-Basic
|
03c9d0b44f5652c99db3b0e42014dd5af50205a2
|
[
"MIT"
] | 1
|
2020-10-17T09:27:46.000Z
|
2020-10-17T09:27:46.000Z
|
name = input()
class_school = 1
sum_of_grades = 0
ejected = False
failed = 0
while True:
grade = float(input())
if grade >= 4.00:
sum_of_grades += grade
if class_school == 12:
break
class_school += 1
else:
failed += 1
if failed == 2:
ejected = True
break
if ejected:
print(f"{name} has been excluded at {class_school} grade")
else:
average = sum_of_grades / class_school
print(f"{name} graduated. Average grade: {average:.2f}")
| 19.703704
| 62
| 0.575188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 100
| 0.18797
|
7fc9fa1da3516cccfb91e93a1b16adc0a561f07f
| 8,990
|
py
|
Python
|
NAO/train_cifar.py
|
yaogood/NAS-tensorflow2
|
a3ed9bc3a2a973c8c54d2ea5b7344a31ed86c057
|
[
"BSD-3-Clause"
] | null | null | null |
NAO/train_cifar.py
|
yaogood/NAS-tensorflow2
|
a3ed9bc3a2a973c8c54d2ea5b7344a31ed86c057
|
[
"BSD-3-Clause"
] | null | null | null |
NAO/train_cifar.py
|
yaogood/NAS-tensorflow2
|
a3ed9bc3a2a973c8c54d2ea5b7344a31ed86c057
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import sys
import glob
import time
import copy
import random
import numpy as np
import utils
import logging
import argparse
import tensorflow as tf
import tensorflow.keras as keras
from model import NASNetworkCIFAR
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# Basic model parameters.
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train',
choices=['train', 'test'])
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10, cifar100'])
parser.add_argument('--model_dir', type=str, default='models')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--eval_batch_size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=600)
parser.add_argument('--cells', type=int, default=6)
parser.add_argument('--nodes', type=int, default=5)
parser.add_argument('--channels', type=int, default=36)
parser.add_argument('--cutout_size', type=int, default=8)
parser.add_argument('--grad_bound', type=float, default=10.0)
parser.add_argument('--initial_lr', type=float, default=0.025)
parser.add_argument('--keep_prob', type=float, default=0.6)
parser.add_argument('--drop_path_keep_prob', type=float, default=0.8)
parser.add_argument('--l2_reg', type=float, default=3e-4)
parser.add_argument('--arch', type=str, default=None)
parser.add_argument('--use_aux_head', action='store_true', default=False)
parser.add_argument('--seed', type=int, default=9)
parser.add_argument('--train_from_scratch', type=bool, default=False)
args = parser.parse_args()
utils.create_exp_dir(args.model_dir)
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=log_format, datefmt='%m/%d %I:%M:%S %p')
def train(train_ds, model, optimizer, global_step, criterion, classes=10):
objs = utils.AvgMeter()
top1 = utils.AvgMeter()
top5 = utils.AvgMeter()
for step, (input, labels) in enumerate(train_ds):
global_step.assign_add(1)
with tf.GradientTape() as tape:
logits, aux_logits = model(input, global_step, training=True)
loss = criterion(tf.one_hot(tf.squeeze(labels), depth=classes), logits)
if aux_logits is not None:
aux_loss = criterion(tf.one_hot(tf.squeeze(labels), depth=classes), aux_logits)
loss += 0.4 * aux_loss
reg_loss = args.l2_reg * tf.sqrt(
tf.reduce_sum([tf.reduce_sum(tf.square(x)) for x in model.trainable_variables]))
loss += reg_loss
gradients = tape.gradient(loss, model.trainable_variables)
if args.grad_bound != 0.0:
gradients, _ = tf.clip_by_global_norm(gradients, 15)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
################################################################################################################
acc1, acc5 = utils.accuracy(tf.nn.softmax(logits, axis=-1), tf.squeeze(labels), topk=(1, 5))
batch_size = input.shape[0]
objs.update(loss.numpy(), batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
if (step + 1) % 100 == 0:
print('train step {} loss {} top1 {} top5 {}'.format(step + 1, objs.avg, top1.avg, top5.avg))
logging.info('train step %03d loss %e top1 %f top5 %f', step + 1, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg, global_step
def valid(valid_ds, model, criterion, classes=10):
objs = utils.AvgMeter()
top1 = utils.AvgMeter()
top5 = utils.AvgMeter()
for step, (input, labels) in enumerate(valid_ds):
logits, _ = model(input, training=False)
loss = criterion(tf.one_hot(tf.squeeze(labels), depth=classes), logits)
acc1, acc5 = utils.accuracy(tf.nn.softmax(logits, axis=-1), tf.squeeze(labels), topk=(1, 5))
batch_size = input.shape[0]
objs.update(loss.numpy(), batch_size)
top1.update(acc1, batch_size)
top5.update(acc5, batch_size)
if (step + 1) % 100 == 0:
print('valid step {} loss {} top1 {} top5 {}'.format(step + 1, objs.avg, top1.avg, top5.avg))
logging.info('valid step %03d %e %f %f', step + 1, objs.avg, top1.avg, top5.avg)
return top1.avg, objs.avg
def train_cifar10():
logging.info("Args = %s", args)
np.random.seed(args.seed)
tf.random.set_seed(args.seed)
global_step = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32)
epoch = tf.Variable(initial_value=0, trainable=False, dtype=tf.int32)
best_acc_top1 = tf.Variable(initial_value=0.0, trainable=False, dtype=tf.float32)
################################################ model setup #######################################################
train_ds, test_ds = utils.load_cifar10(args.batch_size, args.cutout_size)
total_steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
model = NASNetworkCIFAR(classes=10,
reduce_distance=args.cells,
num_nodes=args.nodes,
channels=args.channels,
keep_prob=args.keep_prob,
drop_path_keep_prob=args.drop_path_keep_prob,
use_aux_head=args.use_aux_head,
steps=total_steps,
arch=args.arch)
temp_ = tf.random.uniform((64,32,32,3), minval=0, maxval=1, dtype=tf.float32)
temp_ = model(temp_, step=1, training=True)
model.summary()
model_size = utils.count_parameters_in_MB(model)
print("param size = {} MB".format(model_size))
logging.info("param size = %fMB", model_size)
criterion = keras.losses.CategoricalCrossentropy(from_logits=True)
learning_rate = keras.experimental.CosineDecay(initial_learning_rate=args.initial_lr,
decay_steps=total_steps, alpha=0.0001)
# learning_rate = keras.optimizers.schedules.ExponentialDecay(
# initial_learning_rate=args.initial_lr, decay_steps=total_steps, decay_rate=0.99, staircase=False, name=None
# )
optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
########################################## restore checkpoint ######################################################
if args.train_from_scratch:
utils.clean_dir(args.model_dir)
checkpoint_path = os.path.join(args.model_dir, 'checkpoints')
ckpt = tf.train.Checkpoint(model=model,
optimizer=optimizer,
global_step=global_step,
epoch=epoch,
best_acc_top1=best_acc_top1)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=3)
# if a checkpoint exists, restore the latest checkpoint.
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Latest checkpoint restored!!')
############################################# training process #####################################################
acc_train_result = []
loss_train_result = []
acc_test_result = []
loss_test_result = []
while epoch.numpy() < args.epochs:
print('epoch {} lr {}'.format(epoch.numpy(), optimizer._decayed_lr(tf.float32)))
train_acc, train_loss, step = train(train_ds, model, optimizer, global_step, criterion, classes=10)
test_acc, test_loss = valid(test_ds, model, criterion, classes=10)
acc_train_result.append(train_acc)
loss_train_result.append(train_loss)
acc_test_result.append(test_acc)
loss_test_result.append(test_loss)
logging.info('epoch %d lr %e', epoch.numpy(), optimizer._decayed_lr(tf.float32))
logging.info(acc_train_result)
logging.info(loss_train_result)
logging.info(acc_test_result)
logging.info(loss_test_result)
is_best = False
if test_acc > best_acc_top1:
best_acc_top1 = test_acc
is_best = True
epoch.assign_add(1)
if (epoch.numpy() + 1) % 1 == 0:
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint for epoch {} at {}'.format(epoch.numpy() + 1, ckpt_save_path))
if is_best:
pass
utils.plot_single_list(acc_train_result, x_label='epochs', y_label='acc', file_name='acc_train')
utils.plot_single_list(loss_train_result, x_label='epochs', y_label='loss', file_name='loss_train')
utils.plot_single_list(acc_test_result, x_label='epochs', y_label='acc', file_name='acc_test')
utils.plot_single_list(loss_test_result, x_label='epochs', y_label='loss', file_name='loss_test')
if __name__ == '__main__':
import time
start_time = time.time()
train_cifar10()
print("--- %s seconds ---" % (time.time() - start_time))
| 44.068627
| 120
| 0.628031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,582
| 0.175973
|
7fca526c31f2627682c2720c9612c105d831e507
| 1,585
|
py
|
Python
|
examples/regression.py
|
Spotflock/studio-sdk-python
|
4831819d2a69755777ff773091afc4330f8a91f6
|
[
"MIT"
] | 8
|
2019-03-25T17:21:27.000Z
|
2019-03-26T10:34:30.000Z
|
examples/regression.py
|
Spotflock/studio-sdk-python
|
4831819d2a69755777ff773091afc4330f8a91f6
|
[
"MIT"
] | null | null | null |
examples/regression.py
|
Spotflock/studio-sdk-python
|
4831819d2a69755777ff773091afc4330f8a91f6
|
[
"MIT"
] | null | null | null |
import studio
def main():
c = studio.StudioClient('xxx') # put your app key here.
# REGRESSION
test_data = ""
train_data = ""
test_file_store_response = c.store('../csv/housing_test.csv')
print(test_file_store_response)
test_data = test_file_store_response['fileUrl']
train_data_store_response = c.store('../csv/housing_train.csv')
print(train_data_store_response)
train_data = train_data_store_response['fileUrl']
train_response = c.train("weka", "regression", "Housing Price Model", "LinearRegression",
train_data, "SalePrice", 80,
["LotShape", "Street"], True) # this is the configuration.
print(train_response)
train_job_status_response = c.job_status(train_response['data']['jobId'])
print(train_job_status_response)
train_job_output_response = c.job_output(train_response['data']['jobId'])
print(train_job_output_response)
model = train_job_output_response['output']['modelUrl']
predict_response = c.predict("weka", "regression", test_data,
model)
print(predict_response)
predict_job_status_response = c.job_status(predict_response['data']['jobId'])
print(predict_job_status_response)
predict_job_output_response = c.job_output(predict_response['data']['jobId'])
print(predict_job_output_response)
pred_file = predict_job_output_response['output']['predFileUrl']
prediction_response = c.download(pred_file)
print(prediction_response.text)
if __name__ == '__main__':
main()
| 40.641026
| 93
| 0.692744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 347
| 0.218927
|
7fcb384cb9988d683d28c2f7b5a6810c88a449fa
| 2,763
|
py
|
Python
|
VAE/models/vae_mnist.py
|
Aroksak/generative-dl
|
66b71860266d15736b66b0b17fff37c7e881b142
|
[
"MIT"
] | null | null | null |
VAE/models/vae_mnist.py
|
Aroksak/generative-dl
|
66b71860266d15736b66b0b17fff37c7e881b142
|
[
"MIT"
] | null | null | null |
VAE/models/vae_mnist.py
|
Aroksak/generative-dl
|
66b71860266d15736b66b0b17fff37c7e881b142
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class Encoder(nn.Module):
def _conv_layer_factory(self, input_channels, output_channels, **kwargs):
return nn.Sequential(
nn.Conv2d(input_channels, output_channels, **kwargs),
nn.LeakyReLU(),
)
def __init__(self, input_channels=1, bottleneck_dim=2):
super().__init__()
self.conv_0 = self._conv_layer_factory(input_channels, 32, kernel_size=3, padding=1)
self.conv_1 = self._conv_layer_factory(32, 64, kernel_size=3, stride=2, padding=1)
self.conv_2 = self._conv_layer_factory(64, 64, kernel_size=3, stride=2, padding=1)
self.conv_3 = self._conv_layer_factory(64, 64, kernel_size=3, padding=1)
self.flatten = nn.Flatten()
self.mu = nn.Linear(7*7*64, bottleneck_dim)
self.log_var = nn.Linear(7*7*64, bottleneck_dim)
def forward(self, x):
x = self.conv_0(x)
x = self.conv_1(x)
x = self.conv_2(x)
x = self.conv_3(x)
x = self.flatten(x)
mu = self.mu(x)
log_var = self.log_var(x)
return mu, log_var
class Decoder(nn.Module):
def __init__(self, bottleneck_dim=2, output_channels=1):
super().__init__()
self.dense = nn.Linear(bottleneck_dim, 7*7*64)
self.convtran_0 = nn.ConvTranspose2d(64, 64, kernel_size=3, stride=1, padding=1)
self.relu = nn.LeakyReLU()
self.convtran_1 = nn.ConvTranspose2d(64, 64, kernel_size=3, stride=2, padding=1, output_padding=1)
self.convtran_2 = nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1)
self.convtran_3 = nn.ConvTranspose2d(32, output_channels, kernel_size=3, padding=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.dense(x)
x = self.relu(x)
x = x.view(-1, 64, 7, 7)
x = self.convtran_0(x)
x = self.relu(x)
x = self.convtran_1(x)
x = self.relu(x)
x = self.convtran_2(x)
x = self.relu(x)
x = self.convtran_3(x)
x = self.sigmoid(x)
return x
class VariationalAutoEncoder(nn.Module):
def __init__(self, input_channels=1, bottleneck_dim=2, output_channels=1):
super().__init__()
self.encoder = Encoder(input_channels=input_channels, bottleneck_dim=bottleneck_dim)
self.decoder = Decoder(bottleneck_dim=bottleneck_dim, output_channels=output_channels)
def reparametrize(self, mu, log_var):
std = torch.exp(0.5*log_var)
eps = torch.randn_like(std)
return mu + eps*std
def forward(self, x):
mu, log_var = self.encoder(x)
x = self.reparametrize(mu, log_var)
x = self.decoder(x)
return mu, log_var, x
| 32.127907
| 106
| 0.627579
| 2,719
| 0.984075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
7fcd0efe44d52a8f5eb0ccaff5033e799faefab2
| 503
|
py
|
Python
|
json-read.py
|
ccoffrin/py-json-examples
|
c01bf6994e4480470939621ed0b4b7043b38819f
|
[
"MIT"
] | null | null | null |
json-read.py
|
ccoffrin/py-json-examples
|
c01bf6994e4480470939621ed0b4b7043b38819f
|
[
"MIT"
] | null | null | null |
json-read.py
|
ccoffrin/py-json-examples
|
c01bf6994e4480470939621ed0b4b7043b38819f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
data_json = {}
with open('data/json_00.json', 'r') as file:
data_json = json.load(file)
print(data_json)
print(data_json[0])
print(data_json[1])
print(data_json[2])
print(data_json[3])
print(data_json[4])
print(data_json[5])
print(data_json[6])
print(data_json[5][0])
print(data_json[5][1])
print(data_json[5][2])
print(data_json[5][3])
print(data_json[6])
print(data_json[6]["A"])
print(data_json[6]["B"])
print(data_json[6]["C"])
print(data_json[6]["D"])
| 16.766667
| 44
| 0.691849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 56
| 0.111332
|
7fce7a6d8d2ce871e7042ada46c6923907411052
| 257
|
py
|
Python
|
api_python/app/models/classes_basicas/Empregado.py
|
uninassau-2020-2/proj-grupo5
|
ea7ca233004860a432f7301c72bde03fccce5f92
|
[
"CC0-1.0"
] | null | null | null |
api_python/app/models/classes_basicas/Empregado.py
|
uninassau-2020-2/proj-grupo5
|
ea7ca233004860a432f7301c72bde03fccce5f92
|
[
"CC0-1.0"
] | null | null | null |
api_python/app/models/classes_basicas/Empregado.py
|
uninassau-2020-2/proj-grupo5
|
ea7ca233004860a432f7301c72bde03fccce5f92
|
[
"CC0-1.0"
] | null | null | null |
from app.models.classes_basicas.Pessoa import Pessoa
class Empregado(Pessoa):
id_empregado = None
def getIdEmpregado(self):
return self.id_empregado
def setIdEmpregado(self, id_empregado):
self.id_empregado = id_empregado
| 23.363636
| 52
| 0.723735
| 203
| 0.789883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|