hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b599bf6b4e303165310336665894d94e2aa9e6af | 2,717 | py | Python | tap/pd/cuburi.py | FloaterTS/teme-fmi | 624296d3b3341f1c18fb26768e361ce2e1faa68c | [
"MIT"
] | 54 | 2020-03-17T10:00:15.000Z | 2022-03-31T06:40:30.000Z | tap/pd/cuburi.py | florinalexandrunecula/teme-fmi | b4d7a416a5ca71b76d66b9407ad2b8ee2af9301e | [
"MIT"
] | null | null | null | tap/pd/cuburi.py | florinalexandrunecula/teme-fmi | b4d7a416a5ca71b76d66b9407ad2b8ee2af9301e | [
"MIT"
] | 59 | 2020-01-22T11:39:59.000Z | 2022-03-28T00:19:06.000Z | """
Se dă o listă de cuburi de latură l_i și culoare c_i.
Să se construiască un turn de înălțime maximă astfel încât
laturile cuburilor succesive sunt în ordine crescătoare și
culorile cuburilor alăturate sunt diferite.
Laturile _nu_ sunt distincte.
Să se găsească înălțimea maximă posibilă a unui turn și
numărul de moduri în care se poate obține acel turn.
Sortăm cuburile descrescător după latură.
Definim:
- H[i] = înălțimea maximă a unui turn care are la bază cubul i
- nr[i] = câte turnuri de înălțime H[i] se pot forma având la bază cubul
Inițializăm cuburile de latură minimă:
- înălțimea = înălțimea cubului
- numărul de turnuri = 1
Exemplu:
(9, 3) (8, 1) (8, 2) (7, 3) (7, 1) (6, 1) (5, 2) (3, 4) (3, 5)
38 29 29 21 15 14 8 3 3
4 2 2 2 2 2 2 1 1
Recurența este:
- nr[i] = sumă de nr[j] pentru j unde
H[i] = H[j] + latura cubului i,
și culoarea cubului i != culoarea cubului j
- H[i] = max(H[j] + latura lui i) pentru j unde
pot adăuga cubul i peste cubul j
"""
from typing import NamedTuple
cubes = []
with open('cuburi.txt') as fin:
n, _ = map(int, next(fin).split())
for _ in range(n):
line = next(fin)
length, color = map(int, line.split())
cubes.append(Cube(length, color))
cubes.sort()
max_heights = [cubes[i].length for i in range(n)]
max_counts = [1 for _ in range(n)]
preds = [-1 for _ in range(n)]
for i in range(n):
max_height = cubes[i].length
for j in range(i):
height = cubes[i].length + max_heights[j]
if cubes[i].color != cubes[j].color and cubes[i].length != cubes[j].length:
if height > max_height:
max_height = height
preds[i] = j
max_heights[i] = max_height
if max_height == cubes[i].length:
max_counts[i] = 1
else:
max_count = 0
for j in range(i):
if cubes[i].color != cubes[j].color and max_height == max_heights[j] + cubes[i].length:
max_count += max_counts[j]
max_counts[i] = max_count
max_height = 0
max_idx = -1
for idx, height in enumerate(max_heights):
if height > max_height:
max_height = height
max_idx = idx
current_idx = max_idx
print('Turn:')
while current_idx != -1:
print(cubes[current_idx])
current_idx = preds[current_idx]
print('Număr de turnuri:')
print(sum(max_counts[i] for i in range(n) if max_heights[i] == max_height))
| 26.637255 | 99 | 0.597718 | """
Se dă o listă de cuburi de latură l_i și culoare c_i.
Să se construiască un turn de înălțime maximă astfel încât
laturile cuburilor succesive sunt în ordine crescătoare și
culorile cuburilor alăturate sunt diferite.
Laturile _nu_ sunt distincte.
Să se găsească înălțimea maximă posibilă a unui turn și
numărul de moduri în care se poate obține acel turn.
Sortăm cuburile descrescător după latură.
Definim:
- H[i] = înălțimea maximă a unui turn care are la bază cubul i
- nr[i] = câte turnuri de înălțime H[i] se pot forma având la bază cubul
Inițializăm cuburile de latură minimă:
- înălțimea = înălțimea cubului
- numărul de turnuri = 1
Exemplu:
(9, 3) (8, 1) (8, 2) (7, 3) (7, 1) (6, 1) (5, 2) (3, 4) (3, 5)
38 29 29 21 15 14 8 3 3
4 2 2 2 2 2 2 1 1
Recurența este:
- nr[i] = sumă de nr[j] pentru j unde
H[i] = H[j] + latura cubului i,
și culoarea cubului i != culoarea cubului j
- H[i] = max(H[j] + latura lui i) pentru j unde
pot adăuga cubul i peste cubul j
"""
from typing import NamedTuple
class Cube(NamedTuple):
length: int
color: int
def __repr__(self):
return f'{self.length} {self.color}'
cubes = []
with open('cuburi.txt') as fin:
n, _ = map(int, next(fin).split())
for _ in range(n):
line = next(fin)
length, color = map(int, line.split())
cubes.append(Cube(length, color))
cubes.sort()
max_heights = [cubes[i].length for i in range(n)]
max_counts = [1 for _ in range(n)]
preds = [-1 for _ in range(n)]
for i in range(n):
max_height = cubes[i].length
for j in range(i):
height = cubes[i].length + max_heights[j]
if cubes[i].color != cubes[j].color and cubes[i].length != cubes[j].length:
if height > max_height:
max_height = height
preds[i] = j
max_heights[i] = max_height
if max_height == cubes[i].length:
max_counts[i] = 1
else:
max_count = 0
for j in range(i):
if cubes[i].color != cubes[j].color and max_height == max_heights[j] + cubes[i].length:
max_count += max_counts[j]
max_counts[i] = max_count
max_height = 0
max_idx = -1
for idx, height in enumerate(max_heights):
if height > max_height:
max_height = height
max_idx = idx
current_idx = max_idx
print('Turn:')
while current_idx != -1:
print(cubes[current_idx])
current_idx = preds[current_idx]
print('Număr de turnuri:')
print(sum(max_counts[i] for i in range(n) if max_heights[i] == max_height))
| 43 | 60 | 23 |
e5d9b8492dc2f3c696460b02930b92d545ee10a6 | 129 | py | Python | AdventOfCode_01_1.py | Trapper007/A-beautiful-code-in-Python | 80c4209a7f74e5693b576fe636f667b7195e8b5f | [
"MIT"
] | 1 | 2019-03-02T19:57:25.000Z | 2019-03-02T19:57:25.000Z | AdventOfCode_01_1.py | Trapper007/A-beautiful-code-in-Python | 80c4209a7f74e5693b576fe636f667b7195e8b5f | [
"MIT"
] | null | null | null | AdventOfCode_01_1.py | Trapper007/A-beautiful-code-in-Python | 80c4209a7f74e5693b576fe636f667b7195e8b5f | [
"MIT"
] | null | null | null | zahlen = []
with open('AdventOfCode_01_1_Input.txt') as f:
for zeile in f:
zahlen.append(int(zeile))
print(sum(zahlen)) | 18.428571 | 46 | 0.682171 | zahlen = []
with open('AdventOfCode_01_1_Input.txt') as f:
for zeile in f:
zahlen.append(int(zeile))
print(sum(zahlen)) | 0 | 0 | 0 |
e6ec1b9847eeb2b8c307e32d2722d622f8abb8ce | 2,049 | py | Python | utils/scripts/systemd.py | ethpch/api.ethpch | af56354a7e8f5304a5c86dd752577da376f1f1ce | [
"MIT"
] | 2 | 2021-09-23T14:43:10.000Z | 2021-09-26T12:01:11.000Z | utils/scripts/systemd.py | ethpch/api.ethpch | af56354a7e8f5304a5c86dd752577da376f1f1ce | [
"MIT"
] | null | null | null | utils/scripts/systemd.py | ethpch/api.ethpch | af56354a7e8f5304a5c86dd752577da376f1f1ce | [
"MIT"
] | null | null | null | import platform
__all__ = ()
if platform.system() == 'Linux':
from pathlib import Path
from constants import ROOT_DIR, VENV_DIR, SYSTEMD_DIR
from utils.config import asgi_framework
from . import run_subprocess
__all__ = ('create_systemd_unit', 'enable_systemd_unit', 'start_service',
'disable_systemd_unit', 'stop_service', 'restart_service',
'service_running')
| 37.254545 | 77 | 0.583211 | import platform
__all__ = ()
if platform.system() == 'Linux':
from pathlib import Path
from constants import ROOT_DIR, VENV_DIR, SYSTEMD_DIR
from utils.config import asgi_framework
from . import run_subprocess
def create_systemd_unit(venv: Path = VENV_DIR,
force_install: bool = False,
service_name: str = None):
exec_cmd = f'{venv / "bin/python"} {ROOT_DIR / "main.py"} runserver'
template = ('[Unit]\n'
f'Description={asgi_framework}\n'
'[Service]\n'
'TimeoutSec=3\n'
f'WorkingDirectory={ROOT_DIR}\n'
f'ExecStart={exec_cmd}\n'
'Restart=on-failure\n'
f'ExecReload={exec_cmd}\n'
'RestartSec=3\n'
'[Install]\n'
'WantedBy=multi-user.target')
if not service_name:
service_name = asgi_framework
unit = SYSTEMD_DIR / f'{service_name}.service'
if unit.exists() is False or force_install is True:
unit.write_text(template, encoding='utf-8')
run_subprocess(['systemctl', 'daemon-reload'])
def enable_systemd_unit():
run_subprocess(['systemctl', 'enable', asgi_framework])
def start_service():
run_subprocess(['service', asgi_framework, 'start'])
def disable_systemd_unit():
run_subprocess(['systemctl', 'disable', asgi_framework])
def stop_service():
run_subprocess(['service', asgi_framework, 'stop'])
def restart_service():
run_subprocess(['service', asgi_framework, 'restart'])
def service_running() -> bool:
cp = run_subprocess(['service', asgi_framework, 'status'])
return True if 'active (running)' in cp.stdout else False
__all__ = ('create_systemd_unit', 'enable_systemd_unit', 'start_service',
'disable_systemd_unit', 'stop_service', 'restart_service',
'service_running')
| 1,445 | 0 | 189 |
1f00e8208fc0a114d819af9f6d097f48df76fc14 | 3,968 | py | Python | src/hotdog.py | hunterbly/TalkingBot | 683a043af91909728c39eb949d90af55be7c6475 | [
"Apache-2.0"
] | null | null | null | src/hotdog.py | hunterbly/TalkingBot | 683a043af91909728c39eb949d90af55be7c6475 | [
"Apache-2.0"
] | null | null | null | src/hotdog.py | hunterbly/TalkingBot | 683a043af91909728c39eb949d90af55be7c6475 | [
"Apache-2.0"
] | null | null | null | import requests
import inspect
import urllib
import pandas as pd
#####################################
### ###
### Define constant ###
### ###
#####################################
CONST_ENDPOINT = '206.189.149.240'
CONST_PORT = 4000
CONST_LIBRARY = 'HotDog'
def convert_dict_format(old_dict):
"""
Convert dictionary with key in underscore format to dot foramt.
And values to be quoted. Used for R param conversion
Args:
old_dict (dict): Old dictionary with underscore as key
Returns:
new_dict (dict): New dictionary with dot separated key and quoted values
Example:
old_dict = {'ref.date': '2020-01-10'}
new_dict = convert_dict_key(old_dict)
TODO:
1. Based on type of values, e.g. not quote bool
"""
new_keys = [k.replace('_', '.') for k in old_dict.keys()]
new_values = ["'{}'".format(str(v)) for v in old_dict.values()]
new_dict = dict(zip(new_keys, new_values))
return(new_dict)
def json_to_df(json):
""" json to dataframe with id column dropped """
try:
df = pd.read_json(json)
df.drop(columns=['id'], axis=1, inplace=True, errors='ignore') # drop id column if exists
# Convert datetime columns to date
# if 'date' in df.columns:
# df['date'] = df['date'].dt.date
except:
return(json) # Return error message from R
return(df)
@postit
def GetSignalPerformance(code, option_only=True):
"""
Get signal history performace
Args:
code (str): Stock code
option_only (bool): Specify whether the signal are for option only stocks. Default true
Returns:
df (Dataframe):
Example:
GetSignalPerformance(ref_date = '2020-01-10')
"""
func_name = inspect.stack()[0][3]
return(func_name)
@postit
def LoadHitSignal(ref_date, option_only=True):
"""
Load signal hit history in database.
Return all or option only signal with wide or long format
Args:
ref_date (str): Date in YYYY-MM-DD format, e.g. 2018-01-01
option_only (bool): Specify whether the signal are for option only stocks. Default true
Returns:
df.signal (Dataframe): Stock price dataframe with calculated signal in the input date only
Example:
LoadHitSignal(ref_date = '2020-01-10')
"""
func_name = inspect.stack()[0][3]
return(func_name)
@postit
def check_cronjob():
"""
Return the latest date of records in the cronjob tables
Args:
None
Returns:
df.res (Dataframe): Dataframe of latest date of cronjob tables
Example:
df.res = check_cronjob()
"""
func_name = inspect.stack()[0][3]
return(func_name)
| 25.113924 | 98 | 0.583669 | import requests
import inspect
import urllib
import pandas as pd
#####################################
### ###
### Define constant ###
### ###
#####################################
CONST_ENDPOINT = '206.189.149.240'
CONST_PORT = 4000
CONST_LIBRARY = 'HotDog'
def convert_dict_format(old_dict):
"""
Convert dictionary with key in underscore format to dot foramt.
And values to be quoted. Used for R param conversion
Args:
old_dict (dict): Old dictionary with underscore as key
Returns:
new_dict (dict): New dictionary with dot separated key and quoted values
Example:
old_dict = {'ref.date': '2020-01-10'}
new_dict = convert_dict_key(old_dict)
TODO:
1. Based on type of values, e.g. not quote bool
"""
new_keys = [k.replace('_', '.') for k in old_dict.keys()]
new_values = ["'{}'".format(str(v)) for v in old_dict.values()]
new_dict = dict(zip(new_keys, new_values))
return(new_dict)
def json_to_df(json):
""" json to dataframe with id column dropped """
try:
df = pd.read_json(json)
df.drop(columns=['id'], axis=1, inplace=True, errors='ignore') # drop id column if exists
# Convert datetime columns to date
# if 'date' in df.columns:
# df['date'] = df['date'].dt.date
except:
return(json) # Return error message from R
return(df)
def testing():
url = "http://206.189.149.240:4000/ocpu/library/HotDog/R/load_hit_signal/json"
payload = 'ref_date=%272020-01-10%27&option_only=true'
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
response = requests.request("POST", url, headers=headers, data = payload)
res = response.text.encode('utf8')
return(res)
def postit(method):
def posted(*args, **kw):
func_name = method(*args, **kw)
url = "http://{}:{}/ocpu/library/{}/R/{}/json".format(CONST_ENDPOINT,
CONST_PORT,
CONST_LIBRARY,
func_name)
kw = convert_dict_format(kw)
payload = urllib.parse.urlencode(kw)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
# Send request to R OpenCPU server
response = requests.request("POST", url, headers=headers, data=payload)
res = response.text.encode('utf8')
# res = response.text
df = json_to_df(res)
return(df)
return posted
@postit
def GetSignalPerformance(code, option_only=True):
"""
Get signal history performace
Args:
code (str): Stock code
option_only (bool): Specify whether the signal are for option only stocks. Default true
Returns:
df (Dataframe):
Example:
GetSignalPerformance(ref_date = '2020-01-10')
"""
func_name = inspect.stack()[0][3]
return(func_name)
@postit
def LoadHitSignal(ref_date, option_only=True):
"""
Load signal hit history in database.
Return all or option only signal with wide or long format
Args:
ref_date (str): Date in YYYY-MM-DD format, e.g. 2018-01-01
option_only (bool): Specify whether the signal are for option only stocks. Default true
Returns:
df.signal (Dataframe): Stock price dataframe with calculated signal in the input date only
Example:
LoadHitSignal(ref_date = '2020-01-10')
"""
func_name = inspect.stack()[0][3]
return(func_name)
@postit
def check_cronjob():
"""
Return the latest date of records in the cronjob tables
Args:
None
Returns:
df.res (Dataframe): Dataframe of latest date of cronjob tables
Example:
df.res = check_cronjob()
"""
func_name = inspect.stack()[0][3]
return(func_name)
| 1,130 | 0 | 46 |
b3508e9e922b2c6098b3e3195029c91fade1a9d9 | 206 | py | Python | hdbo/febo/__init__.py | eric-vader/HD-BO-Additive-Models | 0d7e1d46194af2e3d402631caec6e7be9a50376a | [
"MIT"
] | 5 | 2021-03-25T02:58:01.000Z | 2022-02-19T12:58:52.000Z | hdbo/febo/__init__.py | eric-vader/HD-BO-Additive-Models | 0d7e1d46194af2e3d402631caec6e7be9a50376a | [
"MIT"
] | null | null | null | hdbo/febo/__init__.py | eric-vader/HD-BO-Additive-Models | 0d7e1d46194af2e3d402631caec6e7be9a50376a | [
"MIT"
] | 1 | 2020-12-27T07:58:46.000Z | 2020-12-27T07:58:46.000Z | # from . import algorithms
# from . import controller
# from . import environment
# from . import experiment
# from . import models
# from . import optimizers
# from . import plotting
# from . import utils
| 22.888889 | 27 | 0.728155 | # from . import algorithms
# from . import controller
# from . import environment
# from . import experiment
# from . import models
# from . import optimizers
# from . import plotting
# from . import utils
| 0 | 0 | 0 |
adffd45dea7a8f0964bbfdc2edb359531f3df8ae | 1,045 | py | Python | Submission_Assignment_3_CV/Source/Motion.py | rahuljain1310/Augmented-Reality-Application | 6a464151fc08af45197b35a68734bc613ed2a7db | [
"MIT"
] | null | null | null | Submission_Assignment_3_CV/Source/Motion.py | rahuljain1310/Augmented-Reality-Application | 6a464151fc08af45197b35a68734bc613ed2a7db | [
"MIT"
] | null | null | null | Submission_Assignment_3_CV/Source/Motion.py | rahuljain1310/Augmented-Reality-Application | 6a464151fc08af45197b35a68734bc613ed2a7db | [
"MIT"
] | null | null | null | import numpy as np
import math
# ls = np.array([[-1,2,1], [0,-3,2], [1,1,-4]])
# plane = getPlane(ls)
# incident = np.array([1,0,0])
# print(getReflectionFromPlane(plane,incident))
| 21.770833 | 49 | 0.638278 | import numpy as np
import math
def getCentroid(lp):
assert lp.shape[0] == 4
x = np.array(lp)
return np.mean(x,axis=0)
def getPlane(ls):
p1 = ls[0]
p2 = ls[1]
p3 = ls[2]
d1 = p2-p1
d2 = p3-p1
a,b,c = np.cross(d1,d2)
d = (- a * p1[0] - b * p1[1] - c * p1[2])
planeEquation = np.array([a,b,c,d],dtype=float)
return planeEquation/math.sqrt(a*a+b*b+c*c)
def getYcoordinate(plane, x, z=0):
a,b,c,d = plane
return (d-a*x-c*z)/b
def getMotionStep(intialPoint, finalPoint, step):
dstVec = finalPoint-intialPoint
dst = np.linalg.norm(dstVec)
stepTranslationVec = dstVec*(step/dst)
RT = np.identity(4)
RT[:3,3] = stepTranslationVec
return RT
def getFinalPoint(ls,x,z=0):
plane = getPlane(ls)
y = getYcoordinate(plane,x,z)
return np.array([x,y,z,1])
def getReflectionFromPlane(plane,incident):
n = plane[0:3]
return incident-2*n.dot(incident)*n
# ls = np.array([[-1,2,1], [0,-3,2], [1,1,-4]])
# plane = getPlane(ls)
# incident = np.array([1,0,0])
# print(getReflectionFromPlane(plane,incident))
| 720 | 0 | 138 |
77346166654c3b420738931448268b0003c0a722 | 149 | py | Python | practical-penguins/trivia_tavern/trivia_builder/admin.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 40 | 2020-08-02T07:38:22.000Z | 2021-07-26T01:46:50.000Z | practical-penguins/trivia_tavern/trivia_builder/admin.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 134 | 2020-07-31T12:15:45.000Z | 2020-12-13T04:42:19.000Z | practical-penguins/trivia_tavern/trivia_builder/admin.py | Vthechamp22/summer-code-jam-2021 | 0a8bf1f22f6c73300891fd779da36efd8e1304c1 | [
"MIT"
] | 101 | 2020-07-31T12:00:47.000Z | 2021-11-01T09:06:58.000Z | from django.contrib import admin
from .models import TriviaQuiz, TriviaQuestion
admin.site.register(TriviaQuiz)
admin.site.register(TriviaQuestion)
| 24.833333 | 46 | 0.845638 | from django.contrib import admin
from .models import TriviaQuiz, TriviaQuestion
admin.site.register(TriviaQuiz)
admin.site.register(TriviaQuestion)
| 0 | 0 | 0 |
86f1da08feee667aa219ef73ea2379de005b8544 | 3,731 | py | Python | week07/01.ContextManagers/test_silence_exception.py | HackBulgaria/Programming-101-Python-2020-Spring | 443446028df7fe78fcdd6c37dada0b5cd8ed3c93 | [
"MIT"
] | 30 | 2020-01-22T17:22:43.000Z | 2022-01-26T08:28:57.000Z | week07/01.ContextManagers/test_silence_exception.py | HackBulgaria/Programming-101-Python-2020-Spring | 443446028df7fe78fcdd6c37dada0b5cd8ed3c93 | [
"MIT"
] | 1 | 2020-01-21T19:50:47.000Z | 2020-03-18T16:18:31.000Z | week07/01.ContextManagers/test_silence_exception.py | HackBulgaria/Programming-101-Python-2020-Spring | 443446028df7fe78fcdd6c37dada0b5cd8ed3c93 | [
"MIT"
] | 7 | 2019-11-28T15:59:16.000Z | 2020-12-05T08:39:02.000Z | import unittest
from silence_exception import silence_exception, SilenceException
if __name__ == '__main__':
unittest.main()
| 33.918182 | 77 | 0.676762 | import unittest
from silence_exception import silence_exception, SilenceException
class SilenceExceptionTests(unittest.TestCase):
def test_silences_passed_exception(self):
exception = None
try:
with silence_exception(ValueError):
raise ValueError('Testing.')
except Exception as exc:
exception = exc
self.assertIsNone(exception)
def test_not_silences_different_exception_from_passed_one(self):
with self.assertRaises(ValueError):
with silence_exception(TypeError):
raise ValueError('Testing.')
def test_not_silences_passed_exception_outside_context_manager(self):
with self.assertRaises(ValueError, msg='Testing outside with-block'):
with silence_exception(ValueError):
raise ValueError('Testing inside with-block')
raise ValueError('Testing outside with-block')
def test_silences_passed_exception_with_correct_message(self):
exception = None
exc_message = 'Testing with msg argument.'
try:
with silence_exception(ValueError, msg=exc_message):
raise ValueError(exc_message)
except Exception as exc:
exception = exc
self.assertIsNone(exception)
def test_not_silences_passed_exception_with_different_message(self):
exc_message = 'Testing with msg argument.'
with self.assertRaises(ValueError):
with silence_exception(ValueError, msg=exc_message):
raise ValueError(f'{exc_message} - different.')
def test_not_silences_different_exception_with_same_message(self):
exc_message = 'Testing with msg argument.'
with self.assertRaises(TypeError):
with silence_exception(ValueError, msg=exc_message):
raise TypeError(exc_message)
class SilenceExceptionClassTests(unittest.TestCase):
def test_silences_passed_exception(self):
exception = None
try:
with SilenceException(ValueError):
raise ValueError('Testing.')
except Exception as exc:
exception = exc
self.assertIsNone(exception)
def test_not_silences_different_exception_from_passed_one(self):
with self.assertRaises(ValueError):
with SilenceException(TypeError):
raise ValueError('Testing.')
def test_not_silences_passed_exception_outside_context_manager(self):
with self.assertRaises(ValueError, msg='Testing outside with-block'):
with SilenceException(ValueError):
raise ValueError('Testing inside with-block')
raise ValueError('Testing outside with-block')
def test_silences_passed_exception_with_correct_message(self):
exception = None
exc_message = 'Testing with msg argument.'
try:
with SilenceException(ValueError, msg=exc_message):
raise ValueError(exc_message)
except Exception as exc:
exception = exc
self.assertIsNone(exception)
def test_not_silences_passed_exception_with_different_message(self):
exc_message = 'Testing with msg argument.'
with self.assertRaises(ValueError):
with SilenceException(ValueError, msg=exc_message):
raise ValueError(f'{exc_message} - different.')
def test_not_silences_different_exception_with_same_message(self):
exc_message = 'Testing with msg argument.'
with self.assertRaises(TypeError):
with SilenceException(ValueError, msg=exc_message):
raise TypeError(exc_message)
if __name__ == '__main__':
unittest.main()
| 3,172 | 57 | 368 |
e6bfcd74311ee5ad897bc46787e524eb4046754e | 2,091 | py | Python | Dataset Cleaning and Exploration/merge_hospitals.py | ebasanez/samur.ai | 03a9af8bf2e6ab5a743f9b0069527ac8c0c59d6d | [
"MIT"
] | 1 | 2020-05-24T09:31:37.000Z | 2020-05-24T09:31:37.000Z | Dataset Cleaning and Exploration/merge_hospitals.py | ebasanez/samur.ai | 03a9af8bf2e6ab5a743f9b0069527ac8c0c59d6d | [
"MIT"
] | null | null | null | Dataset Cleaning and Exploration/merge_hospitals.py | ebasanez/samur.ai | 03a9af8bf2e6ab5a743f9b0069527ac8c0c59d6d | [
"MIT"
] | 1 | 2020-09-24T17:45:39.000Z | 2020-09-24T17:45:39.000Z | import sys, getopt
import utils
import pandas as pd
import DatasetPaths
import yaml
KEY = 'Hospital'
COLUMNS_TO_KEEP = ['Hospital','km0_x','km0_y']
# Execute only if script run standalone (not imported)
if __name__ == '__main__':
df_samur = pd.read_csv(DatasetPaths.SAMUR)
df_hospitals = pd.read_csv(DatasetPaths.HOSPITALS)
df = merge_hospitals(df_samur, df_hospitals)
print(df.head())
df.to_csv(DatasetPaths.SAMUR_MERGED.format('hospitals'),index = False);
df = assign_ambulances(df,df_hospitals,utils.NUMBER_OF_AMBULANCES)
# Transform to dictionary and save to yaml
df_dict = [{0:{'available_amb':0,'name':'NaN','loc':{'district_code':0,'x':0.0,'y':0.0}}}]
for index,r in df.iterrows():
df_dict.append({index+1:{'available_amb':r.Ambulances,'name':r.Hospital,'loc':{'district_code':r.district_code,'x':r.hospital_x,'y':r.hospital_y}}})
yaml_file = open(DatasetPaths.HOSPITALS_YAML,"w+",encoding='utf8')
yaml.dump(df_dict,yaml_file,allow_unicode = True)
| 41 | 150 | 0.727403 | import sys, getopt
import utils
import pandas as pd
import DatasetPaths
import yaml
KEY = 'Hospital'
COLUMNS_TO_KEEP = ['Hospital','km0_x','km0_y']
def merge_hospitals(df_samur, df_hospitals):
df_hospitals = df_hospitals[['name_orig','Hospital','hospital_x','hospital_y']]
df_samur.rename(columns={'Hospital':'Hospital_old'}, inplace=True)
df = pd.merge(df_samur, df_hospitals, left_on='Hospital_old', right_on='name_orig', how = 'outer')
df.drop(columns=['Hospital_old','name_orig'],inplace=True)
# Remove values for hospitals 'Alcalá de Henares (Ppe. de Asturias)', 'Getafe' because those are outside Madrid
df = df[~df.Hospital.isin(['Alcalá de Henares (Ppe. de Asturias)','Getafe'])]
df.sort_values(by = 'Solicitud',inplace = True);
return df
def assign_ambulances(df_samur, df_hospitals, total_ambulances):
dfg = df_samur.groupby('Hospital').agg({'Hospital':'count'})
dfg.rename(columns={'Hospital':'Total'}, inplace = True)
df = pd.merge(dfg, df_hospitals, left_on='Hospital', right_on='Hospital')
df = df[df['district_code'] != -1]
df.reset_index(inplace = True, drop = True)
total = df.Total.sum()
df['Ambulances'] = round(df['Total'] / total * total_ambulances)
df = df.astype({'Ambulances':'int32'})
print(df)
return df
# Execute only if script run standalone (not imported)
if __name__ == '__main__':
df_samur = pd.read_csv(DatasetPaths.SAMUR)
df_hospitals = pd.read_csv(DatasetPaths.HOSPITALS)
df = merge_hospitals(df_samur, df_hospitals)
print(df.head())
df.to_csv(DatasetPaths.SAMUR_MERGED.format('hospitals'),index = False);
df = assign_ambulances(df,df_hospitals,utils.NUMBER_OF_AMBULANCES)
# Transform to dictionary and save to yaml
df_dict = [{0:{'available_amb':0,'name':'NaN','loc':{'district_code':0,'x':0.0,'y':0.0}}}]
for index,r in df.iterrows():
df_dict.append({index+1:{'available_amb':r.Ambulances,'name':r.Hospital,'loc':{'district_code':r.district_code,'x':r.hospital_x,'y':r.hospital_y}}})
yaml_file = open(DatasetPaths.HOSPITALS_YAML,"w+",encoding='utf8')
yaml.dump(df_dict,yaml_file,allow_unicode = True)
| 1,062 | 0 | 47 |
adabaf55653c8947d4820066a7a6aaeb8d99aef4 | 352 | py | Python | src/dlspringer/book.py | mzntaka0/dlspringer | 26f3abeb5d667c659ed5ea42209b2420a0fb57c9 | [
"Apache-2.0"
] | null | null | null | src/dlspringer/book.py | mzntaka0/dlspringer | 26f3abeb5d667c659ed5ea42209b2420a0fb57c9 | [
"Apache-2.0"
] | null | null | null | src/dlspringer/book.py | mzntaka0/dlspringer | 26f3abeb5d667c659ed5ea42209b2420a0fb57c9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
import argparse
import os
import sys
from abc import abstractmethod, ABCMeta
from pathlib import Path
try:
from bpdb import set_trace
except ImportError:
from pdb import set_trace
class Book(object):
"""
Args:
"""
if __name__ == '__main__':
pass
| 12.571429 | 39 | 0.642045 | # -*- coding: utf-8 -*-
"""
"""
import argparse
import os
import sys
from abc import abstractmethod, ABCMeta
from pathlib import Path
try:
from bpdb import set_trace
except ImportError:
from pdb import set_trace
class Book(object):
"""
Args:
"""
def __init__(self):
self.title
if __name__ == '__main__':
pass
| 17 | 0 | 27 |
877b7dffb5d12ec4f0120b70b526b47ce8174e2c | 158 | py | Python | test/test_append_items.py | codeclimate-testing/falcon | c2d0b9da4d4cffd39cd489ffa886ee745d06f063 | [
"Apache-2.0"
] | 115 | 2015-01-18T13:28:05.000Z | 2022-03-01T23:45:44.000Z | test/test_append_items.py | codeclimate-testing/falcon | c2d0b9da4d4cffd39cd489ffa886ee745d06f063 | [
"Apache-2.0"
] | null | null | null | test/test_append_items.py | codeclimate-testing/falcon | c2d0b9da4d4cffd39cd489ffa886ee745d06f063 | [
"Apache-2.0"
] | 8 | 2015-02-12T04:08:42.000Z | 2018-09-11T20:55:29.000Z | from testing_helpers import wrap
@wrap
| 11.285714 | 33 | 0.670886 | from testing_helpers import wrap
@wrap
def append_items(n):
x = []
for i in range(n):
x.append(i)
def test_append_items():
append_items(1000)
| 69 | 0 | 45 |
89c968fdafb3d01c4904adc70b10fc1f2119094e | 169 | py | Python | Python/Numpy/Dot and Cross/Solution.py | PawarAditi/HackerRank | fcd9d1450ee293372ce5f1d4a3b7284ecf472657 | [
"MIT"
] | 219 | 2018-06-17T19:47:22.000Z | 2022-03-27T15:28:56.000Z | Python/Numpy/Dot and Cross/Solution.py | PawarAditi/HackerRank | fcd9d1450ee293372ce5f1d4a3b7284ecf472657 | [
"MIT"
] | 2 | 2020-08-12T16:47:41.000Z | 2020-12-15T17:05:57.000Z | Python/Numpy/Dot and Cross/Solution.py | PawarAditi/HackerRank | fcd9d1450ee293372ce5f1d4a3b7284ecf472657 | [
"MIT"
] | 182 | 2018-12-12T21:36:50.000Z | 2022-03-26T17:49:51.000Z | import numpy
n = int(input())
a = numpy.array([input().split() for _ in range(n)], int)
b = numpy.array([input().split() for _ in range(n)], int)
print(numpy.dot(a, b)) | 28.166667 | 57 | 0.633136 | import numpy
n = int(input())
a = numpy.array([input().split() for _ in range(n)], int)
b = numpy.array([input().split() for _ in range(n)], int)
print(numpy.dot(a, b)) | 0 | 0 | 0 |
bd13471cb732162104c2580d35ff827541472aba | 675 | py | Python | scripts/freeboard_ros.py | AutonomyLab/freeboard_ros | 3353351c8ab8210485255d4b3e654e4d015f1205 | [
"MIT"
] | 10 | 2015-10-04T14:20:22.000Z | 2021-09-30T17:37:54.000Z | scripts/freeboard_ros.py | AutonomyLab/freeboard_ros | 3353351c8ab8210485255d4b3e654e4d015f1205 | [
"MIT"
] | 2 | 2015-10-27T16:18:51.000Z | 2015-10-28T00:22:48.000Z | scripts/freeboard_ros.py | AutonomyLab/freeboard_ros | 3353351c8ab8210485255d4b3e654e4d015f1205 | [
"MIT"
] | 5 | 2016-07-25T22:46:11.000Z | 2021-09-30T17:37:57.000Z | #!/usr/bin/env python
from bottle import route, run, static_file
import os
import rospy
@route('/freeboard/<filename:path>')
@route('/freeboard/')
if __name__ == "__main__":
main()
| 21.774194 | 79 | 0.681481 | #!/usr/bin/env python
from bottle import route, run, static_file
import os
import rospy
@route('/freeboard/<filename:path>')
def send_static(filename):
script_dir = os.path.dirname(os.path.realpath(__file__)) + '/../freeboard/'
# TODO
print script_dir
return static_file(filename, root=script_dir)
@route('/freeboard/')
def freeboard():
return send_static('index.html')
def main():
rospy.init_node("freeboard_ros_node")
port = int(rospy.get_param("~port", 3274))
debug = rospy.get_param("~debug", False)
host = rospy.get_param("~host", "localhost")
run(host=host, port=port, debug=debug)
if __name__ == "__main__":
main()
| 417 | 0 | 67 |
f497809383431e609b607f0c7002a333dfe87bd1 | 12,702 | py | Python | car.py | chenyx09/Automated_crossroad | a98876cb09aedfc652bef4ccf0021158753173f7 | [
"MIT"
] | null | null | null | car.py | chenyx09/Automated_crossroad | a98876cb09aedfc652bef4ccf0021158753173f7 | [
"MIT"
] | null | null | null | car.py | chenyx09/Automated_crossroad | a98876cb09aedfc652bef4ccf0021158753173f7 | [
"MIT"
] | 1 | 2021-01-22T03:35:24.000Z | 2021-01-22T03:35:24.000Z | #!/usr/local/bin/python
import pdb
import sys,os,platform,matplotlib
#
# import matplotlib.animation as animation
# import matplotlib.pyplot as plt
import sys
import params
sys.path.append("..")
import scipy.io
import numpy as np
from scipy.integrate import odeint
from numpy import cos, sin, tan, arctan2, sqrt, sign, diag,arctan
from numpy.linalg import norm
current_path = os.path.dirname(os.path.abspath(__file__))
from PIL import Image
from math import pi
from scipy.optimize import newton_krylov, fsolve, anderson, broyden1, broyden2
car_colors = {'blue', 'gray', 'white', 'yellow', 'brown',
'white1','green', 'white_cross', 'cyan', 'red1', 'orange'}
#car_colors = {'blue', 'gray', 'black', 'white', 'yellow', 'brown', 'white1','green', 'white_cross', 'cyan', 'red1', 'orange', 'white2'}
car_figs = dict()
for color in car_colors:
car_figs[color] = current_path + '/imglib/cars/' + color + '_car.png'
class KinematicCar():
'''Kinematic car class
'''
def state_dot(self, state,time, acc,steer):
"""
This function defines the system dynamics
Inputs
acc: acceleration input
steer: steering input
"""
# if already at maximum speed, can't no longer accelerate
if state[2] >= self._vmax and acc>0:
v_dot = 0
elif state[2]<=0 and acc<-1e-3:
v_dot = -state[2]
else:
v_dot = np.clip(acc, self.acc_range[0], self.acc_range[1])
theta_dot = state[2] / self._length * tan(np.clip(steer, self.steer_range[0], self.steer_range[1]))
x_dot = state[2] * cos(state[3])
y_dot = state[2] * sin(state[3])
dstate = [x_dot, y_dot, v_dot, theta_dot ]
return dstate
def next(self, inputs, dt):
"""
next is a function that updates the current position of the car when inputs are applied for a duration of dt
Inputs:
inputs: acceleration and steering inputs
dt: integration time
Outputs:
None - the states of the car will get updated
"""
acc, steer = inputs
# take only the real part of the solution
if dt>0.1:
self.state = odeint(self.state_dot, self.state, t=(0, dt), args=(acc,steer))[1]
else:
self.state = self.state + np.array(self.state_dot(self.state,0,acc,steer))*dt
if self.segment==1:
self.wait_time += dt
def find_corner_coordinates(x_state_center_before, y_state_center_before, x_desired, y_desired, theta, square_fig):
"""
This function takes an image and an angle then computes
the coordinates of the corner (observe that vertical axis here is flipped).
If we'd like to put the point specfied by (x_state_center_before, y_state_center_before) at (x_desired, y_desired),
this function returns the coordinates of the lower left corner of the new image
"""
w, h = square_fig.size
theta = -theta
if abs(w - h) > 1:
print('Warning: Figure has to be square! Otherwise, clipping or unexpected behavior may occur')
# warnings.warn("Warning: Figure has to be square! Otherwise, clipping or unexpected behavior may occur")
R = np.array([[cos(theta), sin(theta)], [-sin(theta), cos(theta)]])
x_corner_center_before, y_corner_center_before = -w/2., -h/2. # lower left corner before rotation
x_corner_center_after, y_corner_center_after = -w/2., -h/2. # doesn't change since figure size remains unchanged
x_state_center_after, y_state_center_after = R.dot(np.array([[x_state_center_before], [y_state_center_before]])) # relative coordinates after rotation by theta
x_state_corner_after = x_state_center_after - x_corner_center_after
y_state_corner_after = y_state_center_after - y_corner_center_after
# x_corner_unknown + x_state_corner_after = x_desired
x_corner_unknown = int(x_desired - x_state_center_after + x_corner_center_after)
# y_corner_unknown + y_state_corner_after = y_desired
y_corner_unknown = int(y_desired - y_state_center_after + y_corner_center_after)
return x_corner_unknown, y_corner_unknown
offset = [-1.3,0.0]
# TESTING
# x0 = np.array([params.X1+1,0,0,pi/2-0.1])
# veh = KinematicCar(x0)
# veh_set = [veh]
# intersection_fig = current_path + '/imglib/intersection_stop1.png'
# intersection = Image.open(intersection_fig)
# background = Image.open(intersection_fig)
# fig = plt.figure()
# ax = fig.add_axes([0,0,1,1]) # get rid of white border
# plt.axis('off')
# ts = 0.05
# def animate(frame_idx,veh_set): # update animation by dt
# global background
# ax.clear()
# for veh in veh_set:
# u = turning_con(veh.state,'N','L',veh._length)
# veh.next(u,ts)
# draw_cars(veh_set, background)
# the_intersection = [ax.imshow(background, origin="lower")]
# background.close()
# background = Image.open(intersection_fig)
# return the_intersection
# ani = animation.FuncAnimation(fig, animate, fargs=(veh_set,),frames=int(5/ts), interval=ts*1000, blit=True, repeat=False)
# plt.show()
# pdb.set_trace()
| 37.358824 | 163 | 0.57424 | #!/usr/local/bin/python
import pdb
import sys,os,platform,matplotlib
#
# import matplotlib.animation as animation
# import matplotlib.pyplot as plt
import sys
import params
sys.path.append("..")
import scipy.io
import numpy as np
from scipy.integrate import odeint
from numpy import cos, sin, tan, arctan2, sqrt, sign, diag,arctan
from numpy.linalg import norm
current_path = os.path.dirname(os.path.abspath(__file__))
from PIL import Image
from math import pi
from scipy.optimize import newton_krylov, fsolve, anderson, broyden1, broyden2
car_colors = {'blue', 'gray', 'white', 'yellow', 'brown',
'white1','green', 'white_cross', 'cyan', 'red1', 'orange'}
#car_colors = {'blue', 'gray', 'black', 'white', 'yellow', 'brown', 'white1','green', 'white_cross', 'cyan', 'red1', 'orange', 'white2'}
car_figs = dict()
for color in car_colors:
car_figs[color] = current_path + '/imglib/cars/' + color + '_car.png'
class KinematicCar():
'''Kinematic car class
'''
def __init__(self,
init_state=[0, 0, 0, 0],
segment = None,
dir = None,
goal = None,
length = 3, # length of vehicle in pixels
acc_max = 9.81*0.4, # maximum acceleration of vehicle
acc_min = -9.81*0.8, # maximum deceleration of vehicle
steer_max = 0.8, # maximum steering input in radians
steer_min = -0.8, # minimum steering input in radians
vmax = 30, # maximum velocity
color = 'blue'):
if color not in car_colors:
raise Exception("This car color doesn't exist!")
self._length = length
self._vmax = vmax
self.acc_range = (acc_min, acc_max)
self.steer_range = (steer_min, steer_max)
self.wait_time = 0
self.state = np.array(init_state, dtype='float')
self.color = color
# self.new_unpause = False
# self.new_pause = False
# extended state required for Bastian's primitive computation
self.fig = Image.open(car_figs[color])
self.segment = segment
self.dir = dir
self.goal = goal
self.crossing_traj = None
self.baseline_time = None
self.contract_time = None
def state_dot(self, state,time, acc,steer):
"""
This function defines the system dynamics
Inputs
acc: acceleration input
steer: steering input
"""
# if already at maximum speed, can't no longer accelerate
if state[2] >= self._vmax and acc>0:
v_dot = 0
elif state[2]<=0 and acc<-1e-3:
v_dot = -state[2]
else:
v_dot = np.clip(acc, self.acc_range[0], self.acc_range[1])
theta_dot = state[2] / self._length * tan(np.clip(steer, self.steer_range[0], self.steer_range[1]))
x_dot = state[2] * cos(state[3])
y_dot = state[2] * sin(state[3])
dstate = [x_dot, y_dot, v_dot, theta_dot ]
return dstate
def next(self, inputs, dt):
"""
next is a function that updates the current position of the car when inputs are applied for a duration of dt
Inputs:
inputs: acceleration and steering inputs
dt: integration time
Outputs:
None - the states of the car will get updated
"""
acc, steer = inputs
# take only the real part of the solution
if dt>0.1:
self.state = odeint(self.state_dot, self.state, t=(0, dt), args=(acc,steer))[1]
else:
self.state = self.state + np.array(self.state_dot(self.state,0,acc,steer))*dt
if self.segment==1:
self.wait_time += dt
def find_corner_coordinates(x_state_center_before, y_state_center_before, x_desired, y_desired, theta, square_fig):
"""
This function takes an image and an angle then computes
the coordinates of the corner (observe that vertical axis here is flipped).
If we'd like to put the point specfied by (x_state_center_before, y_state_center_before) at (x_desired, y_desired),
this function returns the coordinates of the lower left corner of the new image
"""
w, h = square_fig.size
theta = -theta
if abs(w - h) > 1:
print('Warning: Figure has to be square! Otherwise, clipping or unexpected behavior may occur')
# warnings.warn("Warning: Figure has to be square! Otherwise, clipping or unexpected behavior may occur")
R = np.array([[cos(theta), sin(theta)], [-sin(theta), cos(theta)]])
x_corner_center_before, y_corner_center_before = -w/2., -h/2. # lower left corner before rotation
x_corner_center_after, y_corner_center_after = -w/2., -h/2. # doesn't change since figure size remains unchanged
x_state_center_after, y_state_center_after = R.dot(np.array([[x_state_center_before], [y_state_center_before]])) # relative coordinates after rotation by theta
x_state_corner_after = x_state_center_after - x_corner_center_after
y_state_corner_after = y_state_center_after - y_corner_center_after
# x_corner_unknown + x_state_corner_after = x_desired
x_corner_unknown = int(x_desired - x_state_center_after + x_corner_center_after)
# y_corner_unknown + y_state_corner_after = y_desired
y_corner_unknown = int(y_desired - y_state_center_after + y_corner_center_after)
return x_corner_unknown, y_corner_unknown
offset = [-1.3,0.0]
def draw_cars(vehicles, background):
for vehicle in vehicles:
x, y, v, theta = vehicle.state
x=params.map_scale_factor*(x+offset[0]*cos(theta)-offset[1]*sin(theta))
y=params.map_scale_factor*(y+offset[0]*sin(theta)+offset[1]*cos(theta))
# convert angle to degrees and positive counter-clockwise
theta_d = -theta/np.pi * 180
vehicle_fig = vehicle.fig
w_orig, h_orig = vehicle_fig.size
# set expand=True so as to disable cropping of output image
vehicle_fig = vehicle_fig.rotate(theta_d, expand = False)
scaled_vehicle_fig_size = tuple([int(params.car_scale_factor * i) for i in vehicle_fig.size])
# rescale car
vehicle_fig = vehicle_fig.resize(scaled_vehicle_fig_size, Image.ANTIALIAS)
# at (full scale) the relative coordinates of the center of the rear axle w.r.t. the center of the figure is center_to_axle_dist
x_corner, y_corner = find_corner_coordinates(-params.car_scale_factor * params.center_to_axle_dist, 0, x, y, theta, vehicle_fig)
background.paste(vehicle_fig, (x_corner, y_corner), vehicle_fig)
def straight_con(x,dir,acc_range,steer_range,xf=None):
alpha = 3
amin,amax = acc_range
if dir == 'N':
des_theta = pi/2
x_des = params.X1
delta_y = -x[0]+x_des
elif dir =='S':
des_theta = -pi/2
x_des = params.X0
delta_y = x[0]-x_des
elif dir =='E':
des_theta = 0
y_des = params.Y0
delta_y = x[1]-y_des
elif dir=='W':
des_theta = -pi
y_des = params.Y1
delta_y = y_des-x[1]
delta_theta = x[3]-des_theta
while delta_theta>pi:
delta_theta-=2*pi
while delta_theta<-pi:
delta_theta+=2*pi
Kv = 1
Ky = 1
Ktheta = 5
vdes = 5
acc = -Kv*(x[2]-vdes)
if xf is None:
acc = np.clip(acc,amin,amax)
else:
if dir=='N':
h = xf[1]-x[1]+(np.sign(xf[2])*xf[2]**2-np.sign(x[2])*x[2]**2)/2/(-amin)
elif dir=='S':
h = x[1]-xf[1]+(np.sign(xf[2])*xf[2]**2-np.sign(x[2])*x[2]**2)/2/(-amin)
elif dir=='E':
h = xf[0]-x[0]+(np.sign(xf[2])*xf[2]**2-np.sign(x[2])*x[2]**2)/2/(-amin)
elif dir=='W':
h = x[0]-xf[0]+(np.sign(xf[2])*xf[2]**2-np.sign(x[2])*x[2]**2)/2/(-amin)
Lfh = xf[2]-x[2]
Lgh = min(x[2]/amin,-1e-3)
accmax = (-alpha*h-Lfh)/Lgh
accmax = max(accmax,amin)
acc = np.clip(acc,amin,accmax)
steer = np.clip((-Ky*delta_y-Ktheta*delta_theta)/(abs(x[2]+0.5)),steer_range[0],steer_range[1])
u = [acc,steer]
return u
def turning_con(x,dir1,dir2,L,acc_range,steer_range):
RL = params.RL
RR = params.RR
if dir1 =='N':
if dir2 =='L':
pivot = np.array([params.X1-RL,params.Y1-RL])
if x[1]<pivot[1]:
des_theta = pi/2
delta_y = -x[0]+params.X1
steer0 = 0
else:
des_theta = arctan2(x[1]-pivot[1],x[0]-pivot[0])+pi/2
delta_y = RL-norm(x[0:2]-pivot)
steer0 = arctan(L/RL)
elif dir2 =='R':
pivot = np.array([params.X1+RR,params.Y0-RR])
if x[1]<pivot[1]:
des_theta = pi/2
delta_y = -x[0]+params.X1
steer0 = 0
else:
des_theta = arctan2(x[1]-pivot[1],x[0]-pivot[0])-pi/2
delta_y = norm(x[0:2]-pivot)-RR
steer0 = -arctan(L/RR)
elif dir1 =='S':
if dir2 =='L':
pivot = np.array([params.X0+RL,params.Y0+RL])
if x[1]>pivot[1]:
des_theta = -pi/2
delta_y = x[0]-params.X0
steer0 = 0
else:
des_theta = arctan2(x[1]-pivot[1],x[0]-pivot[0])+pi/2
delta_y = RL-norm(x[0:2]-pivot)
steer0 = arctan(L/RL)
elif dir2 =='R':
pivot = np.array([params.X0-RR,params.Y1+RR])
if x[1]>pivot[1]:
des_theta = -pi/2
delta_y = x[0]-params.X0
steer0 = 0
else:
des_theta = arctan2(x[1]-pivot[1],x[0]-pivot[0])-pi/2
delta_y = norm(x[0:2]-pivot)-RR
steer0 = -arctan(L/RR)
elif dir1 == 'E':
if dir2 =='L':
pivot = np.array([params.X1-RL,params.Y0+RL])
if x[0]<pivot[0]:
des_theta = 0
delta_y = x[1]-params.Y0
steer0 = 0
else:
des_theta = arctan2(x[1]-pivot[1],x[0]-pivot[0])+pi/2
delta_y = RL-norm(x[0:2]-pivot)
steer0 = arctan(L/RL)
elif dir2 =='R':
pivot = np.array([params.X0-RR,params.Y0-RR])
if x[0]<pivot[0]:
des_theta = 0
delta_y = x[1]-params.Y0
steer0 = 0
else:
des_theta = arctan2(x[1]-pivot[1],x[0]-pivot[0])-pi/2
delta_y = norm(x[0:2]-pivot)-RR
steer0 = -arctan(L/RR)
elif dir1 == 'W':
if dir2 =='L':
pivot = np.array([params.X0+RL,params.Y1-RL])
if x[0]>pivot[0]:
des_theta = -pi
delta_y = params.Y1-x[1]
steer0 = 0
else:
des_theta = arctan2(x[1]-pivot[1],x[0]-pivot[0])+pi/2
delta_y = RL-norm(x[0:2]-pivot)
steer0 = arctan(L/RL)
elif dir2 =='R':
pivot = np.array([params.X1+RR,params.Y1+RR])
if x[0]>pivot[0]:
des_theta = -pi
delta_y = params.Y1-x[1]
steer0 = 0
else:
des_theta = arctan2(x[1]-pivot[1],x[0]-pivot[0])-pi/2
delta_y = norm(x[0:2]-pivot)-RR
steer0 = -arctan(L/RR)
delta_theta = x[3]-des_theta
while delta_theta>pi:
delta_theta-=2*pi
while delta_theta<-pi:
delta_theta+=2*pi
Kv = 1
Ky = 1
Ktheta = 5
vdes = 5
acc = np.clip(-Kv*(x[2]-vdes),acc_range[0],acc_range[1])
steer = np.clip(steer0+(-Ky*delta_y-Ktheta*delta_theta)/(abs(x[2]+0.5)),steer_range[0],steer_range[1])
u = [acc,steer]
return u
# TESTING
# x0 = np.array([params.X1+1,0,0,pi/2-0.1])
# veh = KinematicCar(x0)
# veh_set = [veh]
# intersection_fig = current_path + '/imglib/intersection_stop1.png'
# intersection = Image.open(intersection_fig)
# background = Image.open(intersection_fig)
# fig = plt.figure()
# ax = fig.add_axes([0,0,1,1]) # get rid of white border
# plt.axis('off')
# ts = 0.05
# def animate(frame_idx,veh_set): # update animation by dt
# global background
# ax.clear()
# for veh in veh_set:
# u = turning_con(veh.state,'N','L',veh._length)
# veh.next(u,ts)
# draw_cars(veh_set, background)
# the_intersection = [ax.imshow(background, origin="lower")]
# background.close()
# background = Image.open(intersection_fig)
# return the_intersection
# ani = animation.FuncAnimation(fig, animate, fargs=(veh_set,),frames=int(5/ts), interval=ts*1000, blit=True, repeat=False)
# plt.show()
# pdb.set_trace()
| 7,520 | 0 | 93 |
3c46de8a118812cd829857d4381ac9993e19aed1 | 6,879 | py | Python | src/pywebapp/www/handlers.py | WalsonTung/pywebapp | 24c3eaab3a2ea5c61a9b872a9f55782552d52891 | [
"Apache-2.0"
] | null | null | null | src/pywebapp/www/handlers.py | WalsonTung/pywebapp | 24c3eaab3a2ea5c61a9b872a9f55782552d52891 | [
"Apache-2.0"
] | null | null | null | src/pywebapp/www/handlers.py | WalsonTung/pywebapp | 24c3eaab3a2ea5c61a9b872a9f55782552d52891 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Walson Tung'
'url handlers'
import re,time,json,logging,hashlib,base64,asyncio
import markdown2
from aiohttp import web
from coreweb import get,post
from apis import *
from models import User,Comment,Blog,next_id
from config import configs
COOKIE_NAME = 'awesession'
_COOKIE_KEY = configs.session.secret
def user2cookie(user,max_age):
'''
Generate cookie str by user.
:param user:
:param max_age:
:return:
'''
#build cookie string by:id-expires-sha1
expires = str(time.time() + max_age)
s = '%s-%s-%s-%s' % (user.id,user.passwd,expires,_COOKIE_KEY)
L = [user.id,expires,hashlib.sha1(s.encode('utf-8')).hexdigest()]
return '-'.join(L)
async def cookie2user(cookie_str):
'''
Parse cookie and load user if cookie is valid.
:param cookie_str:
:return:
'''
if not cookie_str:
return None
try:
L = cookie_str.split('-')
if len(L) != 3:
return None
uid,expires,sha1 = L
if float(expires) < time.time():
return None
user = await User.find(uid)
if user is None:
return None
s = '%s-%s-%s-%s' % (uid,user.passwd,expires,_COOKIE_KEY)
if sha1 != hashlib.sha1(s.encode('utf-8')).hexdigest():
logging.info('invalid sha1')
return None
user.passwd = '******'
return user
except Exception as e:
logging.exception(e)
return None
@get('/')
@get('/blog/{id}')
@get('/register')
@get('/signin')
@post('/api/authenticate')
@get('/signout')
@get('/manage/blogs')
@get('/manage/blogs/create')
_RE_EMAIL = re.compile(r'^[a-z0-9\.\-\_]+\@[a-z0-9\-\_]+(\.[a-z0-9\-\_]+){1,4}$')
_RE_SHA1 = re.compile(r'^[0-9a-f]{40}$')
@post('/api/users')
@get('/api/blogs')
@get('/api/blogs/{id}')
@post('/api/blogs') | 30.303965 | 159 | 0.624364 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Walson Tung'
'url handlers'
import re,time,json,logging,hashlib,base64,asyncio
import markdown2
from aiohttp import web
from coreweb import get,post
from apis import *
from models import User,Comment,Blog,next_id
from config import configs
COOKIE_NAME = 'awesession'
_COOKIE_KEY = configs.session.secret
def check_admin(request):
if request.__user__ is None or not request.__user__.admin:
raise APIPermissionError
def get_page_index(page_str):
p = 1
try:
p = int(page_str)
except ValueError as e:
pass
if p < 1:
p = 1
return p
def user2cookie(user,max_age):
'''
Generate cookie str by user.
:param user:
:param max_age:
:return:
'''
#build cookie string by:id-expires-sha1
expires = str(time.time() + max_age)
s = '%s-%s-%s-%s' % (user.id,user.passwd,expires,_COOKIE_KEY)
L = [user.id,expires,hashlib.sha1(s.encode('utf-8')).hexdigest()]
return '-'.join(L)
def text2html(text):
lines = map(lambda s: '<p>%s</p>' % s.replace('&', '&').replace('<', '<').replace('>', '>'), filter(lambda s: s.strip() != '', text.split('\n')))
return ''.join(lines)
async def cookie2user(cookie_str):
'''
Parse cookie and load user if cookie is valid.
:param cookie_str:
:return:
'''
if not cookie_str:
return None
try:
L = cookie_str.split('-')
if len(L) != 3:
return None
uid,expires,sha1 = L
if float(expires) < time.time():
return None
user = await User.find(uid)
if user is None:
return None
s = '%s-%s-%s-%s' % (uid,user.passwd,expires,_COOKIE_KEY)
if sha1 != hashlib.sha1(s.encode('utf-8')).hexdigest():
logging.info('invalid sha1')
return None
user.passwd = '******'
return user
except Exception as e:
logging.exception(e)
return None
@get('/')
async def index(request):
summary = 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.'
blogs = [
Blog(id='1',name='Test Blog',summary=summary,created_at=time.time() - 120),
Blog(id='2',name='Something New',summary=summary,created_at=time.time() - 3600),
Blog(id='3',name='Learn Swift',summary=summary,created_at=time.time() - 7200)
]
return {
'__template__':'blogs.html',
'blogs':blogs
}
@get('/blog/{id}')
async def get_blog(id):
blog = await Blog.find(id)
comments = await Comment.findAll('blog_id=?',[id],orderBy='created_at desc ')
for c in comments:
c.html_content = text2html(c.content)
blog.html_content = markdown2.markdown(blog.content)
return {
'__template__':'blog.html',
'blog':blog,
'comments':comments
}
@get('/register')
def register():
return {
'__template__':'register.html'
}
@get('/signin')
def signin():
return {
'__template__':'signin.html'
}
@post('/api/authenticate')
async def authenticate(*,email,passwd):
if not email:
raise APIValueError('email','Invalid email')
if not passwd:
raise APIValueError('passwd','Invalid password.')
users = await User.findAll('email=?',[email])
if len(users) == 0:
raise APIValueError('email','Email not exist')
user = users[0]
#check passwd:
sha1 = hashlib.sha1()
sha1.update(user.id.encode('utf-8'))
sha1.update(b':')
sha1.update(passwd.encode('utf-8'))
if user.passwd != sha1.hexdigest():
raise APIValueError('passwd','Invalid password')
#authenticate ok,set cookie:
r = web.Response()
r.set_cookie(COOKIE_NAME,user2cookie(user,86400),max_age=86400,httponly=True)
user.passwd = '******'
r.content_type = 'application/json'
r.body = json.dumps(user,ensure_ascii=False).encode('utf-8')
return r
@get('/signout')
def signout(request):
referer = request.headers.get('Referer')
r = web.HTTPFound(referer or '/')
r.set_cookie(COOKIE_NAME,'-deleted-',max_age=0,httponly=True)
logging.info('user signed out')
return r
@get('/manage/blogs')
def manage_blogs(*,page='1'):
return {
'__template__':'manage_blogs.html',
'page_index':get_page_index(page)
}
@get('/manage/blogs/create')
def manage_create_blog():
return {
'__template__':'manage_blog_edit.html',
'id':'',
'action':'/api/blogs'
}
_RE_EMAIL = re.compile(r'^[a-z0-9\.\-\_]+\@[a-z0-9\-\_]+(\.[a-z0-9\-\_]+){1,4}$')
_RE_SHA1 = re.compile(r'^[0-9a-f]{40}$')
@post('/api/users')
async def api_register_user(*,email,name,passwd):
if not name or not name.strip():
raise APIValueError('name')
if not email or not _RE_EMAIL.match(email):
raise APIValueError('email')
if not passwd or not _RE_SHA1.match(passwd):
raise APIValueError('passwd')
users = await User.findAll('email=?',[email])
if len(users) > 0:
raise APIError('regiester:failed','email','Email is already in use.')
uid = next_id()
sha1_passwd = '%s:%s' % (uid,passwd)
user = User(id = uid,name=name.strip(),email=email,
passwd=hashlib.sha1(sha1_passwd.encode('utf-8')).hexdigest(),
image='http://www.gravatar.com/avatar/%s?d=mm&s=120' % hashlib.md5(email.encode('utf-8')).hexdigest())
await user.save()
#make session cookie
r = web.Response()
r.set_cookie(COOKIE_NAME,user2cookie(user,86400),max_age=86400,httponly=True)
user.passwd = '******'
r.content_type = 'application/json'
r.body = json.dumps(user,ensure_ascii=False).encode('utf-8')
return r
@get('/api/blogs')
async def api_blogs(*,page='1'):
page_index = get_page_index(page)
num = await Blog.findNumber('count(id)')
p = Page(num,page_index)
if num == 0:
return dict(page=p,blogs=())
blogs = await Blog.findAll(orderBy='created_at desc',limit=(p.offset,p.limit))
return dict(page=p,blogs=blogs)
@get('/api/blogs/{id}')
async def api_get_blog(*,id):
blog = await Blog.find(id)
return blog
@post('/api/blogs')
async def api_create_blog(request,*,name,summary,content):
check_admin(request)
if not name or not name.strip():
raise APIValueError('name','name cannot be empty.')
if not summary or not summary.strip():
raise APIValueError('summary','summary cannot be empty.')
if not content or not content.strip():
raise APIValueError('content','content cannot be empty')
blog = Blog(user_id=request.__user__.id,user_name = request.__user__.name,user_image = request.__user__.image,
name = name.strip(),summary = summary.strip(),content = content.strip())
await blog.save()
return blog | 4,646 | 0 | 333 |
9ab0ade4973a44bb3c5e278f5743f46b0957da3f | 53 | py | Python | src/fruit_fly_net/__init__.py | Ramos-Ramos/fruit-fly-net | 2eb300aff1395455d54150e41d2adf5ba1424886 | [
"MIT"
] | 2 | 2021-11-10T02:43:58.000Z | 2021-11-10T02:44:13.000Z | src/fruit_fly_net/__init__.py | Ramos-Ramos/fruit-fly-net | 2eb300aff1395455d54150e41d2adf5ba1424886 | [
"MIT"
] | null | null | null | src/fruit_fly_net/__init__.py | Ramos-Ramos/fruit-fly-net | 2eb300aff1395455d54150e41d2adf5ba1424886 | [
"MIT"
] | null | null | null | from .fruit_fly_net import FruitFlyNet, bio_hash_loss | 53 | 53 | 0.886792 | from .fruit_fly_net import FruitFlyNet, bio_hash_loss | 0 | 0 | 0 |
87d3d50dd6564e3c92bf352801f8121ec4667892 | 5,445 | py | Python | ilri/iso3166-lookup.py | ilri/DSpace | 588172385e6afec5ec8b1d5e9919797e7bf56364 | [
"BSD-3-Clause"
] | 9 | 2015-03-05T09:47:25.000Z | 2022-02-15T07:06:38.000Z | ilri/iso3166-lookup.py | ilri/DSpace | 588172385e6afec5ec8b1d5e9919797e7bf56364 | [
"BSD-3-Clause"
] | 200 | 2015-01-16T10:10:04.000Z | 2022-02-16T01:16:02.000Z | ilri/iso3166-lookup.py | ilri/DSpace | 588172385e6afec5ec8b1d5e9919797e7bf56364 | [
"BSD-3-Clause"
] | 14 | 2015-04-28T16:43:52.000Z | 2021-05-04T12:36:15.000Z | #!/usr/bin/env python3
#
# iso3166-lookup.py 0.0.1
#
# Copyright 2020 Alan Orth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---
#
# Queries the ISO 3166 dataset for countries read from a text file. Text file
# should have one organization per line. Results are saved to a CSV including
# the country name, whether it matched or not, and the type of match.
#
# This script is written for Python 3.6+ and requires several modules that you
# can install with pip (I recommend using a Python virtual environment):
#
# $ pip install colorama pycountry requests requests-cache
#
import argparse
import csv
import signal
import sys
import pycountry
from colorama import Fore
# read countries from a text file, one per line
parser = argparse.ArgumentParser(
description="Query ISO 3166-1 to validate countries from a text file and save results in a CSV."
)
parser.add_argument(
"-d",
"--debug",
help="Print debug messages to standard error (stderr).",
action="store_true",
)
parser.add_argument(
"-i",
"--input-file",
help="File name containing countries to look up in ISO 3166-1 and ISO 3166-3.",
required=True,
type=argparse.FileType("r"),
)
parser.add_argument(
"-o",
"--output-file",
help="Name of output file to write results to (CSV).",
required=True,
type=argparse.FileType("w", encoding="UTF-8"),
)
args = parser.parse_args()
# set the signal handler for SIGINT (^C) so we can exit cleanly
signal.signal(signal.SIGINT, signal_handler)
# create empty lists to hold country names
country_names = []
country_official_names = []
country_common_names = []
# iterate over countries and append names to the appropriate lists. We can't use
# a list comprehension here because some countries don't have official_name, etc
# and they raise an AttributeError. Anyways, it's more efficient to iterate over
# the list of countries just once.
for country in pycountry.countries:
country_names.append(country.name.lower())
try:
country_official_names.append(country.official_name.lower())
except AttributeError:
pass
try:
country_common_names.append(country.common_name.lower())
except AttributeError:
pass
# Add names for historic countries from ISO 3166-3
for country in pycountry.historic_countries:
country_names.append(country.name.lower())
try:
country_official_names.append(country.official_name.lower())
except AttributeError:
pass
try:
country_common_names.append(country.common_name.lower())
except AttributeError:
pass
read_countries_from_file()
exit()
| 29.117647 | 100 | 0.650321 | #!/usr/bin/env python3
#
# iso3166-lookup.py 0.0.1
#
# Copyright 2020 Alan Orth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---
#
# Queries the ISO 3166 dataset for countries read from a text file. Text file
# should have one organization per line. Results are saved to a CSV including
# the country name, whether it matched or not, and the type of match.
#
# This script is written for Python 3.6+ and requires several modules that you
# can install with pip (I recommend using a Python virtual environment):
#
# $ pip install colorama pycountry requests requests-cache
#
import argparse
import csv
import signal
import sys
import pycountry
from colorama import Fore
# read countries from a text file, one per line
def read_countries_from_file():
# initialize an empty list for countries
countries = []
for line in args.input_file:
# trim any leading or trailing whitespace (including newlines)
line = line.strip()
# iterate over results and add organization that aren't already present
if line not in countries:
countries.append(line)
# close input file before we exit
args.input_file.close()
resolve_countries(countries)
def resolve_countries(countries):
fieldnames = ["country", "match type", "matched"]
writer = csv.DictWriter(args.output_file, fieldnames=fieldnames)
writer.writeheader()
for country in countries:
if args.debug:
sys.stderr.write(
Fore.GREEN + f"Looking up the country: {country!r}\n" + Fore.RESET
)
# check for exact match
if country.lower() in country_names:
print(f"Name match for {country!r}")
writer.writerow(
{"country": country, "match type": "name", "matched": "true"}
)
elif country.lower() in country_official_names:
print(f"Official name match for {country!r}")
writer.writerow(
{"country": country, "match type": "official_name", "matched": "true"}
)
elif country.lower() in country_common_names:
print(f"Common name match for {country!r}")
writer.writerow(
{
"country": country,
"match type": "common_name",
"matched": "true",
}
)
else:
if args.debug:
sys.stderr.write(
Fore.YELLOW + f"No match for {country!r}\n" + Fore.RESET
)
writer.writerow(
{
"country": country,
"match type": "",
"matched": "false",
}
)
# close output file before we exit
args.output_file.close()
def signal_handler(signal, frame):
# close output file before we exit
args.output_file.close()
sys.exit(1)
parser = argparse.ArgumentParser(
description="Query ISO 3166-1 to validate countries from a text file and save results in a CSV."
)
parser.add_argument(
"-d",
"--debug",
help="Print debug messages to standard error (stderr).",
action="store_true",
)
parser.add_argument(
"-i",
"--input-file",
help="File name containing countries to look up in ISO 3166-1 and ISO 3166-3.",
required=True,
type=argparse.FileType("r"),
)
parser.add_argument(
"-o",
"--output-file",
help="Name of output file to write results to (CSV).",
required=True,
type=argparse.FileType("w", encoding="UTF-8"),
)
args = parser.parse_args()
# set the signal handler for SIGINT (^C) so we can exit cleanly
signal.signal(signal.SIGINT, signal_handler)
# create empty lists to hold country names
country_names = []
country_official_names = []
country_common_names = []
# iterate over countries and append names to the appropriate lists. We can't use
# a list comprehension here because some countries don't have official_name, etc
# and they raise an AttributeError. Anyways, it's more efficient to iterate over
# the list of countries just once.
for country in pycountry.countries:
country_names.append(country.name.lower())
try:
country_official_names.append(country.official_name.lower())
except AttributeError:
pass
try:
country_common_names.append(country.common_name.lower())
except AttributeError:
pass
# Add names for historic countries from ISO 3166-3
for country in pycountry.historic_countries:
country_names.append(country.name.lower())
try:
country_official_names.append(country.official_name.lower())
except AttributeError:
pass
try:
country_common_names.append(country.common_name.lower())
except AttributeError:
pass
read_countries_from_file()
exit()
| 2,147 | 0 | 68 |
0d3751fdaaaf5a53b5763b13e2f47598209a0d76 | 749 | py | Python | rotate_tree.py | joelarmstrong/analysis-purgatory | 97793976b6c58be2868ed91b8874afafe37e3172 | [
"MIT"
] | null | null | null | rotate_tree.py | joelarmstrong/analysis-purgatory | 97793976b6c58be2868ed91b8874afafe37e3172 | [
"MIT"
] | null | null | null | rotate_tree.py | joelarmstrong/analysis-purgatory | 97793976b6c58be2868ed91b8874afafe37e3172 | [
"MIT"
] | null | null | null | """Rotate a newick tree to put the leaf with a given label first."""
from argparse import ArgumentParser
import newick
if __name__ == '__main__':
parser = ArgumentParser(description=__doc__)
parser.add_argument('newick_file')
parser.add_argument('label')
opts = parser.parse_args()
tree = newick.read(opts.newick_file)[0]
rotate(tree, opts.label)
print(newick.dumps(tree))
| 29.96 | 68 | 0.675567 | """Rotate a newick tree to put the leaf with a given label first."""
from argparse import ArgumentParser
import newick
def rotate(node, label):
if node.name == label:
return True
if len(node.descendants) == 0:
return False
assert len(node.descendants) == 2
if rotate(node.descendants[1], label):
node.descendants = reversed(node.descendants)
return True
if rotate(node.descendants[0], label):
return True
if __name__ == '__main__':
parser = ArgumentParser(description=__doc__)
parser.add_argument('newick_file')
parser.add_argument('label')
opts = parser.parse_args()
tree = newick.read(opts.newick_file)[0]
rotate(tree, opts.label)
print(newick.dumps(tree))
| 324 | 0 | 23 |
9d512c376831ff4585b95a583844a6ee51d1ae19 | 332 | py | Python | Algorithms/power.py | DestroyedEpisode/Python-Projects | d795fd3c7b471f08087ee3f4d2ecb58710687ce2 | [
"MIT"
] | null | null | null | Algorithms/power.py | DestroyedEpisode/Python-Projects | d795fd3c7b471f08087ee3f4d2ecb58710687ce2 | [
"MIT"
] | null | null | null | Algorithms/power.py | DestroyedEpisode/Python-Projects | d795fd3c7b471f08087ee3f4d2ecb58710687ce2 | [
"MIT"
] | null | null | null | # b is base
# n is exponent
print(power(5,7))
print()
print(power2(5,7))
| 15.090909 | 40 | 0.53012 | # b is base
# n is exponent
def power(b,n):
print("runtime")
if n == 0:
return 1
if n % 2 == 0:
return power(b * b, n / 2)
else:
return b * power(b * b, (n - 1) / 2)
def power2(b,n):
print("runtime")
if n == 0:
return 1
else:
return b*power2(b,n-1)
print(power(5,7))
print()
print(power2(5,7))
| 211 | 0 | 47 |
3234117c2ece4bfcf221ec22fd26866dfadd3d18 | 1,254 | py | Python | magnebot/paths.py | neuroailab/magnebot | 3f537fcd95685efeadf7200208a310a4c6a2f10c | [
"MIT"
] | null | null | null | magnebot/paths.py | neuroailab/magnebot | 3f537fcd95685efeadf7200208a310a4c6a2f10c | [
"MIT"
] | null | null | null | magnebot/paths.py | neuroailab/magnebot | 3f537fcd95685efeadf7200208a310a4c6a2f10c | [
"MIT"
] | null | null | null | from pathlib import Path
from pkg_resources import resource_filename
"""
Paths to data files in this Python module.
"""
# The path to the data files.
DATA_DIRECTORY = Path(resource_filename(__name__, "data"))
# The path to object data.
OBJECT_DATA_DIRECTORY = DATA_DIRECTORY.joinpath("objects")
# The path to object categories dictionary.
OBJECT_CATEGORIES_PATH = OBJECT_DATA_DIRECTORY.joinpath("categories.json")
# Data for the Magnebot torso's y values.
TORSO_Y = OBJECT_DATA_DIRECTORY.joinpath("torso_y.csv")
# The path to the scene data.
SCENE_DATA_DIRECTORY = DATA_DIRECTORY.joinpath("scenes")
# The path to the dictionary of where the robot can spawn.
SPAWN_POSITIONS_PATH = SCENE_DATA_DIRECTORY.joinpath("spawn_positions.json")
# The directory for occupancy maps.
OCCUPANCY_MAPS_DIRECTORY = SCENE_DATA_DIRECTORY.joinpath("occupancy_maps")
# The directory for room maps.
ROOM_MAPS_DIRECTORY = SCENE_DATA_DIRECTORY.joinpath("room_maps")
# The path to the scene bounds data.
SCENE_BOUNDS_PATH = SCENE_DATA_DIRECTORY.joinpath("scene_bounds.json")
# The directory of Magnebot data.
MAGNEBOT_DIRECTORY = DATA_DIRECTORY.joinpath("magnebot")
# The path to the turn constants data.
TURN_CONSTANTS_PATH = MAGNEBOT_DIRECTORY.joinpath("turn_constants.csv")
| 41.8 | 76 | 0.810207 | from pathlib import Path
from pkg_resources import resource_filename
"""
Paths to data files in this Python module.
"""
# The path to the data files.
DATA_DIRECTORY = Path(resource_filename(__name__, "data"))
# The path to object data.
OBJECT_DATA_DIRECTORY = DATA_DIRECTORY.joinpath("objects")
# The path to object categories dictionary.
OBJECT_CATEGORIES_PATH = OBJECT_DATA_DIRECTORY.joinpath("categories.json")
# Data for the Magnebot torso's y values.
TORSO_Y = OBJECT_DATA_DIRECTORY.joinpath("torso_y.csv")
# The path to the scene data.
SCENE_DATA_DIRECTORY = DATA_DIRECTORY.joinpath("scenes")
# The path to the dictionary of where the robot can spawn.
SPAWN_POSITIONS_PATH = SCENE_DATA_DIRECTORY.joinpath("spawn_positions.json")
# The directory for occupancy maps.
OCCUPANCY_MAPS_DIRECTORY = SCENE_DATA_DIRECTORY.joinpath("occupancy_maps")
# The directory for room maps.
ROOM_MAPS_DIRECTORY = SCENE_DATA_DIRECTORY.joinpath("room_maps")
# The path to the scene bounds data.
SCENE_BOUNDS_PATH = SCENE_DATA_DIRECTORY.joinpath("scene_bounds.json")
# The directory of Magnebot data.
MAGNEBOT_DIRECTORY = DATA_DIRECTORY.joinpath("magnebot")
# The path to the turn constants data.
TURN_CONSTANTS_PATH = MAGNEBOT_DIRECTORY.joinpath("turn_constants.csv")
| 0 | 0 | 0 |
e5d9a3ae4c72b94b630dfbe24ecfa7a3593469b6 | 11,696 | py | Python | vega/networks/pytorch/customs/modnas/arch_space/torch/shufflenetv2.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | vega/networks/pytorch/customs/modnas/arch_space/torch/shufflenetv2.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | vega/networks/pytorch/customs/modnas/arch_space/torch/shufflenetv2.py | zjzh/vega | aa6e7b8c69024262fc483ee06113b4d1bd5156d8 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# This file is adapted from the SinglePathOneShot library at
# https://github.com/megvii-model/SinglePathOneShot
# 2020.6.29-Changed for Modular-NAS search space.
# Huawei Technologies Co., Ltd. <linyunfeng5@huawei.com>
# Copyright 2020 Huawei Technologies Co., Ltd.
"""ShuffleNetV2 architectures."""
import torch
import torch.nn as nn
from modnas.registry.construct import register as register_constructor
from modnas.registry.construct import DefaultMixedOpConstructor, DefaultSlotTraversalConstructor
from modnas.registry.arch_space import build, register
from ..slot import register_slot_ccs
from .. import ops
from ..slot import Slot
kernel_sizes = [3, 5, 7, 9]
for k in kernel_sizes:
register_slot_ccs(
lambda C_in, C_out, S, chn_mid=None, ks=k: ShuffleUnit(C_in, C_out, S, ksize=ks, chn_mid=chn_mid),
'SHU{}'.format(k))
register_slot_ccs(
lambda C_in, C_out, S, chn_mid=None, ks=k: ShuffleUnitXception(C_in, C_out, S, ksize=ks, chn_mid=chn_mid),
'SHX{}'.format(k))
def channel_split(x, split):
"""Return data split in channel dimension."""
if x.size(1) == split * 2:
return torch.split(x, split, dim=1)
else:
raise ValueError('Failed to return data split in channel dimension.')
def shuffle_channels(x, groups=2):
"""Return data shuffled in channel dimension."""
batch_size, channels, height, width = x.size()
if channels % groups == 0:
channels_per_group = channels // groups
x = x.view(batch_size, groups, channels_per_group, height, width)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, channels, height, width)
return x
else:
raise ValueError('Failed to return data shuffled in channel dimension.')
class ShuffleUnit(nn.Module):
"""ShuffleNetV2 unit class."""
def forward(self, x):
"""Return network output."""
if self.stride == 1:
x_proj, x = channel_split(x, self.chn_in)
elif self.stride == 2:
x_proj = x
x = torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)
x = shuffle_channels(x)
return x
class ShuffleUnitXception(nn.Module):
"""ShuffleNetV2 Xception unit class."""
def forward(self, x):
"""Return network output."""
if self.stride == 1:
x_proj, x = channel_split(x, self.chn_in)
elif self.stride == 2:
x_proj = x
x = torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)
x = shuffle_channels(x)
return x
class ShuffleNetV2(nn.Module):
"""ShuffleNetV2 class."""
def _get_stem(self, chn_in, chn, stride=2):
"""Return stem layers."""
if stride == 4:
return nn.Sequential(
nn.Conv2d(chn_in, chn, 3, 2, 1, bias=False),
nn.BatchNorm2d(chn, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, 1),
)
return nn.Sequential(
nn.Conv2d(chn_in, chn, 3, stride, 1, bias=False),
nn.BatchNorm2d(chn, affine=True),
nn.ReLU(inplace=True),
)
def forward(self, x):
"""Return network output."""
x = self.features(x)
x = self.globalpool(x)
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
"""Initialize weights for all modules."""
first_conv = True
for m in self.modules():
if isinstance(m, nn.Conv2d):
if first_conv:
nn.init.normal_(m.weight, 0, 0.01)
first_conv = False
else:
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@register_constructor
class ShuffleNetV2SearchConstructor(DefaultMixedOpConstructor):
"""ShuffleNetV2 mixed operator search space constructor."""
def convert(self, slot):
"""Convert slot to mixed operator."""
cands = self.candidates[:]
if self.add_identity_op and slot.stride == 1 and slot.chn_in == slot.chn_out:
self.candidates.append('IDT')
ent = super().convert(slot)
self.candidates = cands
return ent
@register_constructor
class ShuffleNetV2PredefinedConstructor(DefaultSlotTraversalConstructor):
"""ShuffleNetV2 original network constructor."""
def convert(self, slot):
"""Convert slot to module."""
return build('SHU3', slot)
@register
def shufflenetv2_oneshot(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 oneshot model."""
cfgs = [
[16, 1, 2, 1.0],
[64, 4, 2, 1.0],
[160, 4, 2, 1.0],
[320, 8, 2, 1.0],
[640, 4, 2, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
@register
def cifar_shufflenetv2_oneshot(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 oneshot model for CIFAR dataset."""
cfgs = [
[24, 1, 1, 1.0],
[64, 4, 2, 1.0],
[160, 4, 2, 1.0],
[320, 8, 2, 1.0],
[640, 4, 1, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
@register
def shufflenetv2(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 model."""
cfgs = [
[24, 1, 4, 1.0],
[116, 4, 2, 1.0],
[232, 8, 2, 1.0],
[464, 4, 2, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
@register
def cifar_shufflenetv2(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 model for CIFAR dataset."""
cfgs = [
[24, 1, 1, 1.0],
[116, 4, 2, 1.0],
[232, 8, 2, 1.0],
[464, 4, 2, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
| 34.809524 | 114 | 0.556515 | # -*- coding:utf-8 -*-
# This file is adapted from the SinglePathOneShot library at
# https://github.com/megvii-model/SinglePathOneShot
# 2020.6.29-Changed for Modular-NAS search space.
# Huawei Technologies Co., Ltd. <linyunfeng5@huawei.com>
# Copyright 2020 Huawei Technologies Co., Ltd.
"""ShuffleNetV2 architectures."""
import torch
import torch.nn as nn
from modnas.registry.construct import register as register_constructor
from modnas.registry.construct import DefaultMixedOpConstructor, DefaultSlotTraversalConstructor
from modnas.registry.arch_space import build, register
from ..slot import register_slot_ccs
from .. import ops
from ..slot import Slot
kernel_sizes = [3, 5, 7, 9]
for k in kernel_sizes:
register_slot_ccs(
lambda C_in, C_out, S, chn_mid=None, ks=k: ShuffleUnit(C_in, C_out, S, ksize=ks, chn_mid=chn_mid),
'SHU{}'.format(k))
register_slot_ccs(
lambda C_in, C_out, S, chn_mid=None, ks=k: ShuffleUnitXception(C_in, C_out, S, ksize=ks, chn_mid=chn_mid),
'SHX{}'.format(k))
def channel_split(x, split):
"""Return data split in channel dimension."""
if x.size(1) == split * 2:
return torch.split(x, split, dim=1)
else:
raise ValueError('Failed to return data split in channel dimension.')
def shuffle_channels(x, groups=2):
"""Return data shuffled in channel dimension."""
batch_size, channels, height, width = x.size()
if channels % groups == 0:
channels_per_group = channels // groups
x = x.view(batch_size, groups, channels_per_group, height, width)
x = x.transpose(1, 2).contiguous()
x = x.view(batch_size, channels, height, width)
return x
else:
raise ValueError('Failed to return data shuffled in channel dimension.')
class ShuffleUnit(nn.Module):
"""ShuffleNetV2 unit class."""
def __init__(self, chn_in, chn_out, stride, ksize, chn_mid=None):
super(ShuffleUnit, self).__init__()
chn_in = chn_in // 2 if stride == 1 else chn_in
chn_mid = int(chn_out // 2) if chn_mid is None else chn_mid
self.stride = stride
self.ksize = ksize
self.chn_in = chn_in
pad = ksize // 2
outputs = chn_out - chn_in
branch_main = [
# pw
nn.Conv2d(chn_in, chn_mid, 1, 1, 0, bias=False),
nn.BatchNorm2d(chn_mid, **ops.config.bn),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(chn_mid, chn_mid, ksize, stride, pad, groups=chn_mid, bias=False),
nn.BatchNorm2d(chn_mid, **ops.config.bn),
# pw-linear
nn.Conv2d(chn_mid, outputs, 1, 1, 0, bias=False),
nn.BatchNorm2d(outputs, **ops.config.bn),
nn.ReLU(inplace=True),
]
self.branch_main = nn.Sequential(*branch_main)
if stride == 2:
branch_proj = [
# dw
nn.Conv2d(chn_in, chn_in, ksize, stride, pad, groups=chn_in, bias=False),
nn.BatchNorm2d(chn_in, **ops.config.bn),
# pw-linear
nn.Conv2d(chn_in, chn_in, 1, 1, 0, bias=False),
nn.BatchNorm2d(chn_in, **ops.config.bn),
nn.ReLU(inplace=True),
]
else:
branch_proj = []
self.branch_proj = nn.Sequential(*branch_proj)
def forward(self, x):
"""Return network output."""
if self.stride == 1:
x_proj, x = channel_split(x, self.chn_in)
elif self.stride == 2:
x_proj = x
x = torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)
x = shuffle_channels(x)
return x
class ShuffleUnitXception(nn.Module):
"""ShuffleNetV2 Xception unit class."""
def __init__(self, chn_in, chn_out, stride, ksize=3, chn_mid=None):
super(ShuffleUnitXception, self).__init__()
chn_in = chn_in // 2 if stride == 1 else chn_in
chn_mid = int(chn_out // 2) if chn_mid is None else chn_mid
self.stride = stride
self.ksize = ksize
self.chn_in = chn_in
outputs = chn_out - chn_in
pad = ksize // 2
branch_main = [
# dw
nn.Conv2d(chn_in, chn_in, ksize, stride, pad, groups=chn_in, bias=False),
nn.BatchNorm2d(chn_in, **ops.config.bn),
# pw
nn.Conv2d(chn_in, chn_mid, 1, 1, 0, bias=False),
nn.BatchNorm2d(chn_mid, **ops.config.bn),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(chn_mid, chn_mid, ksize, 1, pad, groups=chn_mid, bias=False),
nn.BatchNorm2d(chn_mid, **ops.config.bn),
# pw
nn.Conv2d(chn_mid, chn_mid, 1, 1, 0, bias=False),
nn.BatchNorm2d(chn_mid, **ops.config.bn),
nn.ReLU(inplace=True),
# dw
nn.Conv2d(chn_mid, chn_mid, ksize, 1, pad, groups=chn_mid, bias=False),
nn.BatchNorm2d(chn_mid, **ops.config.bn),
# pw
nn.Conv2d(chn_mid, outputs, 1, 1, 0, bias=False),
nn.BatchNorm2d(outputs, **ops.config.bn),
nn.ReLU(inplace=True),
]
self.branch_main = nn.Sequential(*branch_main)
if self.stride == 2:
branch_proj = [
# dw
nn.Conv2d(chn_in, chn_in, ksize, stride, pad, groups=chn_in, bias=False),
nn.BatchNorm2d(chn_in, **ops.config.bn),
# pw-linear
nn.Conv2d(chn_in, chn_in, 1, 1, 0, bias=False),
nn.BatchNorm2d(chn_in, **ops.config.bn),
nn.ReLU(inplace=True),
]
else:
branch_proj = []
self.branch_proj = nn.Sequential(*branch_proj)
def forward(self, x):
"""Return network output."""
if self.stride == 1:
x_proj, x = channel_split(x, self.chn_in)
elif self.stride == 2:
x_proj = x
x = torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)
x = shuffle_channels(x)
return x
class ShuffleNetV2(nn.Module):
"""ShuffleNetV2 class."""
def __init__(self, cfgs, chn_in=3, n_classes=1000, dropout_rate=0.1):
super(ShuffleNetV2, self).__init__()
self.out_channels = [cfg[0] for cfg in cfgs]
self.num_repeats = [cfg[1] for cfg in cfgs]
self.strides = [cfg[2] for cfg in cfgs]
self.expansions = [cfg[3] for cfg in cfgs]
features = []
for i, (c, n, s, e) in enumerate(cfgs):
if i == 0:
features.append(self._get_stem(chn_in, c, s))
elif i == len(cfgs) - 1:
features.append(
nn.Sequential(
nn.Conv2d(chn_in, c, 1, 1, 0, bias=False),
nn.BatchNorm2d(c, affine=True),
nn.ReLU(inplace=True),
))
else:
for j in range(n):
block_stride = s if j == 0 else 1
chn_mid = int(c // 2 * e)
features.append(Slot(_chn_in=chn_in, _chn_out=c, _stride=block_stride, chn_mid=chn_mid))
chn_in = c
chn_in = c
self.features = nn.Sequential(*features)
self.globalpool = nn.AdaptiveAvgPool2d(1)
self.dropout = nn.Dropout(dropout_rate)
self.classifier = nn.Linear(chn_in, n_classes, bias=False)
self._initialize_weights()
def _get_stem(self, chn_in, chn, stride=2):
"""Return stem layers."""
if stride == 4:
return nn.Sequential(
nn.Conv2d(chn_in, chn, 3, 2, 1, bias=False),
nn.BatchNorm2d(chn, affine=True),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, 2, 1),
)
return nn.Sequential(
nn.Conv2d(chn_in, chn, 3, stride, 1, bias=False),
nn.BatchNorm2d(chn, affine=True),
nn.ReLU(inplace=True),
)
def forward(self, x):
"""Return network output."""
x = self.features(x)
x = self.globalpool(x)
x = self.dropout(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
"""Initialize weights for all modules."""
first_conv = True
for m in self.modules():
if isinstance(m, nn.Conv2d):
if first_conv:
nn.init.normal_(m.weight, 0, 0.01)
first_conv = False
else:
nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
if m.weight is not None:
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0.0001)
nn.init.constant_(m.running_mean, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@register_constructor
class ShuffleNetV2SearchConstructor(DefaultMixedOpConstructor):
"""ShuffleNetV2 mixed operator search space constructor."""
def __init__(self, *args, add_identity_op=True, **kwargs):
super().__init__(*args, **kwargs)
self.add_identity_op = add_identity_op
def convert(self, slot):
"""Convert slot to mixed operator."""
cands = self.candidates[:]
if self.add_identity_op and slot.stride == 1 and slot.chn_in == slot.chn_out:
self.candidates.append('IDT')
ent = super().convert(slot)
self.candidates = cands
return ent
@register_constructor
class ShuffleNetV2PredefinedConstructor(DefaultSlotTraversalConstructor):
"""ShuffleNetV2 original network constructor."""
def convert(self, slot):
"""Convert slot to module."""
return build('SHU3', slot)
@register
def shufflenetv2_oneshot(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 oneshot model."""
cfgs = [
[16, 1, 2, 1.0],
[64, 4, 2, 1.0],
[160, 4, 2, 1.0],
[320, 8, 2, 1.0],
[640, 4, 2, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
@register
def cifar_shufflenetv2_oneshot(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 oneshot model for CIFAR dataset."""
cfgs = [
[24, 1, 1, 1.0],
[64, 4, 2, 1.0],
[160, 4, 2, 1.0],
[320, 8, 2, 1.0],
[640, 4, 1, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
@register
def shufflenetv2(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 model."""
cfgs = [
[24, 1, 4, 1.0],
[116, 4, 2, 1.0],
[232, 8, 2, 1.0],
[464, 4, 2, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
@register
def cifar_shufflenetv2(cfgs=None, **kwargs):
"""Return a ShuffleNetV2 model for CIFAR dataset."""
cfgs = [
[24, 1, 1, 1.0],
[116, 4, 2, 1.0],
[232, 8, 2, 1.0],
[464, 4, 2, 1.0],
[1024, 1, 1, 1.0],
] if cfgs is None else cfgs
return ShuffleNetV2(cfgs=cfgs, **kwargs)
| 4,865 | 0 | 108 |
0b6aba60e86b40e506e3288de40234240d1fc8f7 | 462 | py | Python | series_tiempo_ar_api/apps/management/migrations/0003_auto_20180720_1504.py | datosgobar/series-tiempo-ar-api | 6b553c573f6e8104f8f3919efe79089b7884280c | [
"MIT"
] | 28 | 2017-12-16T20:30:52.000Z | 2021-08-11T17:35:04.000Z | series_tiempo_ar_api/apps/management/migrations/0003_auto_20180720_1504.py | datosgobar/series-tiempo-ar-api | 6b553c573f6e8104f8f3919efe79089b7884280c | [
"MIT"
] | 446 | 2017-11-16T15:21:40.000Z | 2021-06-10T20:14:21.000Z | series_tiempo_ar_api/apps/management/migrations/0003_auto_20180720_1504.py | datosgobar/series-tiempo-ar-api | 6b553c573f6e8104f8f3919efe79089b7884280c | [
"MIT"
] | 12 | 2018-08-23T16:13:32.000Z | 2022-03-01T23:12:28.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-20 18:04
from __future__ import unicode_literals
from django.db import migrations
| 20.086957 | 50 | 0.58658 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-07-20 18:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('management', '0002_auto_20180521_1429'),
]
operations = [
migrations.RemoveField(
model_name='node',
name='admins',
),
migrations.DeleteModel(
name='Node',
),
]
| 0 | 291 | 23 |
a61175c9afaba42b069c4aa075e4239fbf25c4e8 | 9,744 | py | Python | pyvisa/testsuite/keysight_assisted_tests/test_resource_manager.py | jpsecher/pyvisa | bb8fd9d21b1efa1f311e12402e21292a656a0e6a | [
"MIT"
] | null | null | null | pyvisa/testsuite/keysight_assisted_tests/test_resource_manager.py | jpsecher/pyvisa | bb8fd9d21b1efa1f311e12402e21292a656a0e6a | [
"MIT"
] | null | null | null | pyvisa/testsuite/keysight_assisted_tests/test_resource_manager.py | jpsecher/pyvisa | bb8fd9d21b1efa1f311e12402e21292a656a0e6a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Test the capabilities of the ResourceManager.
"""
import gc
import logging
import re
import pytest
from pyvisa import InvalidSession, ResourceManager, VisaIOError, errors
from pyvisa.constants import AccessModes, InterfaceType, StatusCode
from pyvisa.highlevel import VisaLibraryBase
from pyvisa.rname import ResourceName
from pyvisa.testsuite import BaseTestCase
from . import RESOURCE_ADDRESSES, require_virtual_instr
@require_virtual_instr
class TestResourceManager:
"""Test the pyvisa ResourceManager."""
def setup_method(self):
"""Create a ResourceManager with the default backend library."""
self.rm = ResourceManager()
def teardown_method(self):
"""Close the ResourceManager."""
if self.rm is not None:
self.rm.close()
del self.rm
gc.collect()
def test_lifecycle(self, caplog):
"""Test creation and closing of the resource manager."""
assert self.rm.session is not None
assert self.rm.visalib is not None
assert self.rm is self.rm.visalib.resource_manager
assert not self.rm.list_opened_resources()
assert self.rm.visalib is ResourceManager(self.rm.visalib).visalib
with caplog.at_level(level=logging.DEBUG, logger="pyvisa"):
self.rm.close()
assert caplog.records
with pytest.raises(InvalidSession):
self.rm.session
assert self.rm.visalib.resource_manager is None
def test_cleanup_on_del(self, caplog):
"""Test that deleting the rm does clean the VISA session"""
# The test seems to assert what it should even though the coverage report
# seems wrong
rm = self.rm
self.rm = None
with caplog.at_level(logging.DEBUG, logger="pyvisa"):
del rm
gc.collect()
assert "Closing ResourceManager" in caplog.records[0].message
def test_resource_manager_unicity(self):
"""Test the resource manager is unique per backend as expected."""
new_rm = ResourceManager()
assert self.rm is new_rm
assert self.rm.session == new_rm.session
def test_str(self):
"""Test computing the string representation of the resource manager"""
assert re.match(r"Resource Manager of .*", str(self.rm))
self.rm.close()
assert re.match(r"Resource Manager of .*", str(self.rm))
def test_repr(self):
"""Test computing the repr of the resource manager"""
assert re.match(r"<ResourceManager\(<.*>\)>", repr(self.rm))
self.rm.close()
assert re.match(r"<ResourceManager\(<.*>\)>", repr(self.rm))
def test_last_status(self):
"""Test accessing the status of the last operation."""
assert self.rm.last_status == StatusCode.success
# Access the generic last status through the visalib
assert self.rm.last_status == self.rm.visalib.last_status
# Test accessing the status for an invalid session
with pytest.raises(errors.Error) as cm:
self.rm.visalib.get_last_status_in_session("_nonexisting_")
assert "The session" in cm.exconly()
def test_list_resource(self):
"""Test listing the available resources."""
# Default settings
resources = self.rm.list_resources()
for v in (v for v in RESOURCE_ADDRESSES.values() if v.endswith("INSTR")):
assert str(ResourceName.from_string(v)) in resources
# All resources
resources = self.rm.list_resources("?*")
for v in RESOURCE_ADDRESSES.values():
assert str(ResourceName.from_string(v)) in resources
def test_accessing_resource_infos(self):
"""Test accessing resource infos."""
rname = list(RESOURCE_ADDRESSES.values())[0]
rinfo_ext = self.rm.resource_info(rname)
rinfo = self.rm.resource_info(rname, extended=False)
rname = ResourceName().from_string(rname)
assert rinfo_ext.interface_type == getattr(
InterfaceType, rname.interface_type.lower()
)
assert rinfo_ext.interface_board_number == int(rname.board)
assert rinfo_ext.resource_class == rname.resource_class
assert rinfo_ext.resource_name == str(rname)
assert rinfo.interface_type == getattr(
InterfaceType, rname.interface_type.lower()
)
assert rinfo.interface_board_number == int(rname.board)
def test_listing_resource_infos(self):
"""Test listing resource infos."""
infos = self.rm.list_resources_info()
for rname, rinfo_ext in infos.items():
rname = ResourceName().from_string(rname)
assert rinfo_ext.interface_type == getattr(
InterfaceType, rname.interface_type.lower()
)
assert rinfo_ext.interface_board_number == int(rname.board)
assert rinfo_ext.resource_class == rname.resource_class
assert rinfo_ext.resource_name == str(rname)
def test_opening_resource(self):
"""Test opening and closing resources."""
rname = list(RESOURCE_ADDRESSES.values())[0]
rsc = self.rm.open_resource(rname, timeout=1234)
# Check the resource is listed as opened and the attributes are right.
assert rsc in self.rm.list_opened_resources()
assert rsc.timeout == 1234
# Close the rm to check that we close all resources.
self.rm.close()
assert not self.rm.list_opened_resources()
with pytest.raises(InvalidSession):
rsc.session
def test_opening_resource_bad_open_timeout(self):
"""Test opening a resource with a non integer open_timeout."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.raises(ValueError) as cm:
self.rm.open_resource(rname, open_timeout="")
assert "integer (or compatible type)" in str(cm.exconly())
def test_opening_resource_with_lock(self):
"""Test opening a locked resource"""
rname = list(RESOURCE_ADDRESSES.values())[0]
rsc = self.rm.open_resource(rname, access_mode=AccessModes.exclusive_lock)
assert len(self.rm.list_opened_resources()) == 1
# Timeout when accessing a locked resource
with pytest.raises(VisaIOError):
self.rm.open_resource(rname, access_mode=AccessModes.exclusive_lock)
assert len(self.rm.list_opened_resources()) == 1
# Success to access an unlocked resource.
rsc.unlock()
with self.rm.open_resource(
rname, access_mode=AccessModes.exclusive_lock
) as rsc2:
assert rsc.session != rsc2.session
assert len(self.rm.list_opened_resources()) == 2
def test_opening_resource_specific_class(self):
"""Test opening a resource requesting a specific class."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.raises(TypeError):
self.rm.open_resource(rname, resource_pyclass=object)
assert len(self.rm.list_opened_resources()) == 0
def test_open_resource_unknown_resource_type(self, caplog):
"""Test opening a resource for which no registered class exist."""
rc = ResourceManager._resource_classes
old = rc.copy()
rc[(InterfaceType.unknown, "")] = FakeResource
del rc[(InterfaceType.tcpip, "INSTR")]
rm = ResourceManager()
try:
caplog.clear()
with caplog.at_level(level=logging.DEBUG, logger="pyvisa"):
with pytest.raises(RuntimeError):
rm.open_resource("TCPIP::192.168.0.1::INSTR")
assert caplog.records
finally:
ResourceManager._resource_classes = old
def test_opening_resource_unknown_attribute(self):
"""Test opening a resource and attempting to set an unknown attr."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.raises(ValueError):
self.rm.open_resource(rname, unknown_attribute=None)
assert len(self.rm.list_opened_resources()) == 0
def test_get_instrument(self):
"""Check that we get the expected deprecation warning."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.warns(FutureWarning):
self.rm.get_instrument(rname)
@require_virtual_instr
class TestResourceParsing(BaseTestCase):
"""Test parsing resources using the builtin mechanism and the VISA lib.
Those tests require that the interface exist (at least in Keysight
implementation) so we cannot test arbitrary interfaces (PXI for example).
"""
def setup_method(self):
"""Create a ResourceManager with the default backend library."""
self.rm = ResourceManager()
def teardown_method(self):
"""Close the ResourceManager."""
del self.rm
gc.collect()
| 36.223048 | 82 | 0.654044 | # -*- coding: utf-8 -*-
"""Test the capabilities of the ResourceManager.
"""
import gc
import logging
import re
import pytest
from pyvisa import InvalidSession, ResourceManager, VisaIOError, errors
from pyvisa.constants import AccessModes, InterfaceType, StatusCode
from pyvisa.highlevel import VisaLibraryBase
from pyvisa.rname import ResourceName
from pyvisa.testsuite import BaseTestCase
from . import RESOURCE_ADDRESSES, require_virtual_instr
@require_virtual_instr
class TestResourceManager:
"""Test the pyvisa ResourceManager."""
def setup_method(self):
"""Create a ResourceManager with the default backend library."""
self.rm = ResourceManager()
def teardown_method(self):
"""Close the ResourceManager."""
if self.rm is not None:
self.rm.close()
del self.rm
gc.collect()
def test_lifecycle(self, caplog):
"""Test creation and closing of the resource manager."""
assert self.rm.session is not None
assert self.rm.visalib is not None
assert self.rm is self.rm.visalib.resource_manager
assert not self.rm.list_opened_resources()
assert self.rm.visalib is ResourceManager(self.rm.visalib).visalib
with caplog.at_level(level=logging.DEBUG, logger="pyvisa"):
self.rm.close()
assert caplog.records
with pytest.raises(InvalidSession):
self.rm.session
assert self.rm.visalib.resource_manager is None
def test_cleanup_on_del(self, caplog):
"""Test that deleting the rm does clean the VISA session"""
# The test seems to assert what it should even though the coverage report
# seems wrong
rm = self.rm
self.rm = None
with caplog.at_level(logging.DEBUG, logger="pyvisa"):
del rm
gc.collect()
assert "Closing ResourceManager" in caplog.records[0].message
def test_resource_manager_unicity(self):
"""Test the resource manager is unique per backend as expected."""
new_rm = ResourceManager()
assert self.rm is new_rm
assert self.rm.session == new_rm.session
def test_str(self):
"""Test computing the string representation of the resource manager"""
assert re.match(r"Resource Manager of .*", str(self.rm))
self.rm.close()
assert re.match(r"Resource Manager of .*", str(self.rm))
def test_repr(self):
"""Test computing the repr of the resource manager"""
assert re.match(r"<ResourceManager\(<.*>\)>", repr(self.rm))
self.rm.close()
assert re.match(r"<ResourceManager\(<.*>\)>", repr(self.rm))
def test_last_status(self):
"""Test accessing the status of the last operation."""
assert self.rm.last_status == StatusCode.success
# Access the generic last status through the visalib
assert self.rm.last_status == self.rm.visalib.last_status
# Test accessing the status for an invalid session
with pytest.raises(errors.Error) as cm:
self.rm.visalib.get_last_status_in_session("_nonexisting_")
assert "The session" in cm.exconly()
def test_list_resource(self):
"""Test listing the available resources."""
# Default settings
resources = self.rm.list_resources()
for v in (v for v in RESOURCE_ADDRESSES.values() if v.endswith("INSTR")):
assert str(ResourceName.from_string(v)) in resources
# All resources
resources = self.rm.list_resources("?*")
for v in RESOURCE_ADDRESSES.values():
assert str(ResourceName.from_string(v)) in resources
def test_accessing_resource_infos(self):
"""Test accessing resource infos."""
rname = list(RESOURCE_ADDRESSES.values())[0]
rinfo_ext = self.rm.resource_info(rname)
rinfo = self.rm.resource_info(rname, extended=False)
rname = ResourceName().from_string(rname)
assert rinfo_ext.interface_type == getattr(
InterfaceType, rname.interface_type.lower()
)
assert rinfo_ext.interface_board_number == int(rname.board)
assert rinfo_ext.resource_class == rname.resource_class
assert rinfo_ext.resource_name == str(rname)
assert rinfo.interface_type == getattr(
InterfaceType, rname.interface_type.lower()
)
assert rinfo.interface_board_number == int(rname.board)
def test_listing_resource_infos(self):
"""Test listing resource infos."""
infos = self.rm.list_resources_info()
for rname, rinfo_ext in infos.items():
rname = ResourceName().from_string(rname)
assert rinfo_ext.interface_type == getattr(
InterfaceType, rname.interface_type.lower()
)
assert rinfo_ext.interface_board_number == int(rname.board)
assert rinfo_ext.resource_class == rname.resource_class
assert rinfo_ext.resource_name == str(rname)
def test_opening_resource(self):
"""Test opening and closing resources."""
rname = list(RESOURCE_ADDRESSES.values())[0]
rsc = self.rm.open_resource(rname, timeout=1234)
# Check the resource is listed as opened and the attributes are right.
assert rsc in self.rm.list_opened_resources()
assert rsc.timeout == 1234
# Close the rm to check that we close all resources.
self.rm.close()
assert not self.rm.list_opened_resources()
with pytest.raises(InvalidSession):
rsc.session
def test_opening_resource_bad_open_timeout(self):
"""Test opening a resource with a non integer open_timeout."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.raises(ValueError) as cm:
self.rm.open_resource(rname, open_timeout="")
assert "integer (or compatible type)" in str(cm.exconly())
def test_opening_resource_with_lock(self):
"""Test opening a locked resource"""
rname = list(RESOURCE_ADDRESSES.values())[0]
rsc = self.rm.open_resource(rname, access_mode=AccessModes.exclusive_lock)
assert len(self.rm.list_opened_resources()) == 1
# Timeout when accessing a locked resource
with pytest.raises(VisaIOError):
self.rm.open_resource(rname, access_mode=AccessModes.exclusive_lock)
assert len(self.rm.list_opened_resources()) == 1
# Success to access an unlocked resource.
rsc.unlock()
with self.rm.open_resource(
rname, access_mode=AccessModes.exclusive_lock
) as rsc2:
assert rsc.session != rsc2.session
assert len(self.rm.list_opened_resources()) == 2
def test_opening_resource_specific_class(self):
"""Test opening a resource requesting a specific class."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.raises(TypeError):
self.rm.open_resource(rname, resource_pyclass=object)
assert len(self.rm.list_opened_resources()) == 0
def test_open_resource_unknown_resource_type(self, caplog):
"""Test opening a resource for which no registered class exist."""
rc = ResourceManager._resource_classes
old = rc.copy()
class FakeResource:
def __init__(self, *args):
raise RuntimeError()
rc[(InterfaceType.unknown, "")] = FakeResource
del rc[(InterfaceType.tcpip, "INSTR")]
rm = ResourceManager()
try:
caplog.clear()
with caplog.at_level(level=logging.DEBUG, logger="pyvisa"):
with pytest.raises(RuntimeError):
rm.open_resource("TCPIP::192.168.0.1::INSTR")
assert caplog.records
finally:
ResourceManager._resource_classes = old
def test_opening_resource_unknown_attribute(self):
"""Test opening a resource and attempting to set an unknown attr."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.raises(ValueError):
self.rm.open_resource(rname, unknown_attribute=None)
assert len(self.rm.list_opened_resources()) == 0
def test_get_instrument(self):
"""Check that we get the expected deprecation warning."""
rname = list(RESOURCE_ADDRESSES.values())[0]
with pytest.warns(FutureWarning):
self.rm.get_instrument(rname)
@require_virtual_instr
class TestResourceParsing(BaseTestCase):
"""Test parsing resources using the builtin mechanism and the VISA lib.
Those tests require that the interface exist (at least in Keysight
implementation) so we cannot test arbitrary interfaces (PXI for example).
"""
def setup_method(self):
"""Create a ResourceManager with the default backend library."""
self.rm = ResourceManager()
def teardown_method(self):
"""Close the ResourceManager."""
del self.rm
gc.collect()
def test_parse_tcpip_instr(self):
self._parse_test("TCPIP::192.168.200.200::INSTR")
def test_parse_tcpip_socket(self):
self._parse_test("TCPIP::192.168.200.200::7020::SOCKET")
def _parse_test(self, rn):
# Visa lib
p = self.rm.visalib.parse_resource_extended(self.rm.session, rn)
# Internal
pb = VisaLibraryBase.parse_resource_extended(
self.rm.visalib, self.rm.session, rn
)
assert p == pb
# Non-extended parsing
# Visa lib
p = self.rm.visalib.parse_resource(self.rm.session, rn)
# Internal
pb = VisaLibraryBase.parse_resource(self.rm.visalib, self.rm.session, rn)
assert p == pb
| 683 | -2 | 146 |
fd8128ee1dc7f05db20a1a0fa30dca6cc8000eed | 17,908 | py | Python | framework/plugin/plugin_helper.py | DarKnight24/owtf | cb4d17ecfec1e8a2f22af3cb0ef7b33f8173825c | [
"BSD-3-Clause"
] | 2 | 2017-10-10T06:30:38.000Z | 2021-06-17T12:59:59.000Z | framework/plugin/plugin_helper.py | unexpectedBy/owtf | 695b6dc723756bffcd6c21f6e962a927758ae4f9 | [
"BSD-3-Clause"
] | null | null | null | framework/plugin/plugin_helper.py | unexpectedBy/owtf | 695b6dc723756bffcd6c21f6e962a927758ae4f9 | [
"BSD-3-Clause"
] | 3 | 2017-12-30T20:43:22.000Z | 2020-02-29T07:58:32.000Z | #!/usr/bin/env python
'''
This module contains helper functions to make plugins simpler to read and write,
centralising common functionality easy to reuse
'''
import os
import re
import cgi
import logging
from tornado.template import Template
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.lib.exceptions import FrameworkAbortException, PluginAbortException
from framework.lib.general import *
from framework.utils import FileOperations
PLUGIN_OUTPUT = {"type": None, "output": None} # This will be json encoded and stored in db as string
| 48.928962 | 120 | 0.64407 | #!/usr/bin/env python
'''
This module contains helper functions to make plugins simpler to read and write,
centralising common functionality easy to reuse
'''
import os
import re
import cgi
import logging
from tornado.template import Template
from framework.dependency_management.dependency_resolver import BaseComponent
from framework.lib.exceptions import FrameworkAbortException, PluginAbortException
from framework.lib.general import *
from framework.utils import FileOperations
PLUGIN_OUTPUT = {"type": None, "output": None} # This will be json encoded and stored in db as string
class PluginHelper(BaseComponent):
COMPONENT_NAME = "plugin_helper"
mNumLinesToShow = 25
def __init__(self):
self.register_in_service_locator()
self.config = self.get_component("config")
self.target = self.get_component("target")
self.url_manager = self.get_component("url_manager")
self.plugin_handler = self.get_component("plugin_handler")
self.reporter = self.get_component("reporter")
self.requester = self.get_component("requester")
self.shell = self.get_component("shell")
self.timer = self.get_component("timer")
# Compile regular expressions only once on init:
self.RobotsAllowRegexp = re.compile("Allow: ([^\n #]+)")
self.RobotsDisallowRegexp = re.compile("Disallow: ([^\n #]+)")
self.RobotsSiteMap = re.compile("Sitemap: ([^\n #]+)")
def MultipleReplace(self, Text, ReplaceDict): # This redundant method is here so that plugins can use it
return MultipleReplace(Text, ReplaceDict)
def CommandTable(self, Command):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "CommandTable"
plugin_output["output"] = {"Command": Command}
return ([plugin_output])
def LinkList(self, LinkListName, Links):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "LinkList"
plugin_output["output"] = {"LinkListName": LinkListName, "Links": Links}
return ([plugin_output])
def ResourceLinkList(self, ResourceListName, ResourceList):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "ResourceLinkList"
plugin_output["output"] = {"ResourceListName": ResourceListName, "ResourceList": ResourceList}
return ([plugin_output])
def TabbedResourceLinkList(self, ResourcesList):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TabbedResourceLinkList"
plugin_output["output"] = {"ResourcesList": ResourcesList}
return ([plugin_output])
def ListPostProcessing(self, ResourceListName, LinkList, HTMLLinkList):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "ListPostProcessing"
plugin_output["output"] = {
"ResourceListName": ResourceListName,
"LinkList": LinkList,
"HTMLLinkList": HTMLLinkList
}
return ([plugin_output])
def RequestLinkList(self, ResourceListName, ResourceList, PluginInfo):
LinkList = []
for Name, Resource in ResourceList:
Chunks = Resource.split('###POST###')
URL = Chunks[0]
POST = None
Method = 'GET'
if len(Chunks) > 1: # POST
Method = 'POST'
POST = Chunks[1]
Transaction = self.requester.GetTransaction(True, URL, Method, POST)
if Transaction.Found:
RawHTML = Transaction.GetRawResponseBody()
FilteredHTML = self.reporter.sanitize_html(RawHTML)
NotSandboxedPath = self.plugin_handler.DumpOutputFile("NOT_SANDBOXED_%s.html" % Name, FilteredHTML,
PluginInfo)
logging.info("File: NOT_SANDBOXED_%s.html saved to: %s", Name, NotSandboxedPath)
iframe_template = Template("""
<iframe src="{{ NotSandboxedPath }}" sandbox="" security="restricted" frameborder='0'
style="overflow-y:auto; overflow-x:hidden;width:100%;height:100%;" >
Your browser does not support iframes
</iframe>
""")
iframe = iframe_template.generate(NotSandboxedPath=NotSandboxedPath.split('/')[-1])
SandboxedPath = self.plugin_handler.DumpOutputFile("SANDBOXED_%s.html" % Name, iframe, PluginInfo)
logging.info("File: SANDBOXED_%s.html saved to: %s", Name, SandboxedPath)
LinkList.append((Name, SandboxedPath))
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "RequestLinkList"
plugin_output["output"] = {"ResourceListName": ResourceListName, "LinkList": LinkList}
return ([plugin_output])
def VulnerabilitySearchBox(self, SearchStr):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "VulnerabilitySearchBox"
plugin_output["output"] = {"SearchStr": SearchStr}
return ([plugin_output])
def SuggestedCommandBox(self, PluginInfo, CommandCategoryList, Header=''):
plugin_output = dict(PLUGIN_OUTPUT)
PluginOutputDir = self.InitPluginOutputDir(PluginInfo)
plugin_output["type"] = "SuggestedCommandBox"
plugin_output["output"] = {
"PluginOutputDir": PluginOutputDir,
"CommandCategoryList": CommandCategoryList,
"Header": Header
}
return ([plugin_output])
def SetConfigPluginOutputDir(self, PluginInfo):
PluginOutputDir = self.plugin_handler.GetPluginOutputDir(PluginInfo)
# FULL output path for plugins to use
self.target.SetPath('plugin_output_dir', "%s/%s" % (os.getcwd(), PluginOutputDir))
self.shell.RefreshReplacements() # Get dynamic replacement, i.e. plugin-specific output directory
return PluginOutputDir
def InitPluginOutputDir(self, PluginInfo):
PluginOutputDir = self.SetConfigPluginOutputDir(PluginInfo)
FileOperations.create_missing_dirs(PluginOutputDir) # Create output dir so that scripts can cd to it :)
return PluginOutputDir
def RunCommand(self, Command, PluginInfo, PluginOutputDir):
FrameworkAbort = PluginAbort = False
if not PluginOutputDir:
PluginOutputDir = self.InitPluginOutputDir(PluginInfo)
self.timer.start_timer('FormatCommandAndOutput')
ModifiedCommand = self.shell.GetModifiedShellCommand(Command, PluginOutputDir)
try:
RawOutput = self.shell.shell_exec_monitor(ModifiedCommand, PluginInfo)
except PluginAbortException, PartialOutput:
RawOutput = str(PartialOutput.parameter) # Save Partial Output
PluginAbort = True
except FrameworkAbortException, PartialOutput:
RawOutput = str(PartialOutput.parameter) # Save Partial Output
FrameworkAbort = True
TimeStr = self.timer.get_elapsed_time_as_str('FormatCommandAndOutput')
logging.info("Time=%s", TimeStr)
return [ModifiedCommand, FrameworkAbort, PluginAbort, TimeStr, RawOutput, PluginOutputDir]
def GetCommandOutputFileNameAndExtension(self, InputName):
OutputName = InputName
OutputExtension = "txt"
if InputName.split('.')[-1] in ['html']:
OutputName = InputName[0:-5]
OutputExtension = "html"
return [OutputName, OutputExtension]
def EscapeSnippet(self, Snippet, Extension):
if Extension == "html": # HTML
return str(Snippet)
return cgi.escape(str(Snippet)) # Escape snippet to avoid breaking HTML
def CommandDump(self, CommandIntro, OutputIntro, ResourceList, PluginInfo, PreviousOutput):
output_list = []
PluginOutputDir = self.InitPluginOutputDir(PluginInfo)
for Name, Command in ResourceList:
dump_file_name = "%s.txt" % os.path.splitext(Name)[0] # Add txt extension to avoid wrong mimetypes
plugin_output = dict(PLUGIN_OUTPUT)
ModifiedCommand, FrameworkAbort, PluginAbort, TimeStr, RawOutput, PluginOutputDir = self.RunCommand(Command,
PluginInfo, PluginOutputDir)
plugin_output["type"] = "CommandDump"
plugin_output["output"] = {
"Name": self.GetCommandOutputFileNameAndExtension(Name)[0],
"CommandIntro": CommandIntro,
"ModifiedCommand": ModifiedCommand,
"RelativeFilePath": self.plugin_handler.DumpOutputFile(dump_file_name, RawOutput, PluginInfo,
RelativePath=True),
"OutputIntro": OutputIntro,
"TimeStr": TimeStr
}
plugin_output = [plugin_output]
# This command returns URLs for processing
if Name == self.config.FrameworkConfigGet('EXTRACT_URLS_RESERVED_RESOURCE_NAME'):
# The plugin_output output dict will be remade if the resource is of this type
plugin_output = self.LogURLsFromStr(RawOutput)
# TODO: Look below to handle streaming report
if PluginAbort: # Pass partial output to external handler:
raise PluginAbortException(PreviousOutput + plugin_output)
if FrameworkAbort:
raise FrameworkAbortException(PreviousOutput + plugin_output)
output_list += plugin_output
return (output_list)
def LogURLsFromStr(self, RawOutput):
plugin_output = dict(PLUGIN_OUTPUT)
self.timer.start_timer('LogURLsFromStr')
# Extract and classify URLs and store in DB
URLList = self.url_manager.ImportURLs(RawOutput.strip().split("\n"))
NumFound = 0
VisitURLs = False
# TODO: Whether or not active testing will depend on the user profile ;). Have cool ideas for profile names
if True:
VisitURLs = True
# Visit all URLs if not in Cache
for Transaction in self.requester.GetTransactions(True, self.url_manager.GetURLsToVisit()):
if Transaction is not None and Transaction.Found:
NumFound += 1
TimeStr = self.timer.get_elapsed_time_as_str('LogURLsFromStr')
logging.info("Spider/URL scaper time=%s", TimeStr)
plugin_output["type"] = "URLsFromStr"
plugin_output["output"] = {"TimeStr": TimeStr, "VisitURLs": VisitURLs, "URLList": URLList, "NumFound": NumFound}
return ([plugin_output])
def DumpFile(self, Filename, Contents, PluginInfo, LinkName=''):
save_path = self.plugin_handler.DumpOutputFile(Filename, Contents, PluginInfo)
if not LinkName:
LinkName = save_path
logging.info("File: %s saved to: %s", Filename, save_path)
template = Template("""
<a href="{{ Link }}" target="_blank">
{{ LinkName }}
</a>
""")
return [save_path, template.generate(LinkName=LinkName, Link="../../../%s" % save_path)]
def DumpFileGetLink(self, Filename, Contents, PluginInfo, LinkName=''):
return self.DumpFile(Filename, Contents, PluginInfo, LinkName)[1]
def AnalyseRobotsEntries(self, Contents): # Find the entries of each kind and count them
num_lines = len(Contents.split("\n")) # Total number of robots.txt entries
AllowedEntries = list(set(self.RobotsAllowRegexp.findall(Contents))) # list(set()) is to avoid repeated entries
num_allow = len(AllowedEntries) # Number of lines that start with "Allow:"
DisallowedEntries = list(set(self.RobotsDisallowRegexp.findall(Contents)))
num_disallow = len(DisallowedEntries) # Number of lines that start with "Disallow:"
SitemapEntries = list(set(self.RobotsSiteMap.findall(Contents)))
num_sitemap = len(SitemapEntries) # Number of lines that start with "Sitemap:"
RobotsFound = True
if 0 == num_allow and 0 == num_disallow and 0 == num_sitemap:
RobotsFound = False
return [num_lines, AllowedEntries, num_allow, DisallowedEntries, num_disallow, SitemapEntries, num_sitemap,
RobotsFound]
def ProcessRobots(self, PluginInfo, Contents, LinkStart, LinkEnd, Filename='robots.txt'):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "Robots"
num_lines, AllowedEntries, num_allow, DisallowedEntries, num_disallow, SitemapEntries, num_sitemap, NotStr = \
self.AnalyseRobotsEntries(Contents)
SavePath = self.plugin_handler.DumpOutputFile(Filename, Contents, PluginInfo, True)
TopURL = self.target.Get('top_url')
EntriesList = []
# robots.txt contains some entries, show browsable list! :)
if num_disallow > 0 or num_allow > 0 or num_sitemap > 0:
self.url_manager.AddURLsStart()
for Display, Entries in [['Disallowed Entries', DisallowedEntries], ['Allowed Entries', AllowedEntries],
['Sitemap Entries', SitemapEntries]]:
Links = [] # Initialise category-specific link list
for Entry in Entries:
if 'Sitemap Entries' == Display:
URL = Entry
self.url_manager.AddURL(URL) # Store real links in the DB
Links.append([Entry, Entry]) # Show link in defined format (passive/semi_passive)
else:
URL = TopURL + Entry
self.url_manager.AddURL(URL) # Store real links in the DB
# Show link in defined format (passive/semi_passive)
Links.append([Entry, LinkStart + Entry + LinkEnd])
EntriesList.append((Display, Links))
NumAddedURLs = self.url_manager.AddURLsEnd()
plugin_output["output"] = {
"NotStr": NotStr,
"NumLines": num_lines,
"NumAllow": num_allow,
"NumDisallow": num_disallow,
"NumSitemap": num_sitemap,
"SavePath": SavePath,
"NumAddedURLs": NumAddedURLs,
"EntriesList": EntriesList
}
return ([plugin_output])
def TransactionTable(self, transactions_list):
# Store transaction ids in the output, so that reporter can fetch transactions from db
trans_ids = []
for transaction in transactions_list:
trans_ids.append(transaction.GetID())
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TransactionTableFromIDs"
plugin_output["output"] = {"TransactionIDs": trans_ids}
return ([plugin_output])
def TransactionTableForURLList(self, UseCache, URLList, Method=None, Data=None):
# Have to make sure that those urls are visited ;), so we
# perform get transactions but don't save the transaction ids etc..
self.requester.GetTransactions(UseCache, URLList, Method, Data)
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TransactionTableForURLList"
plugin_output["output"] = {"UseCache": UseCache, "URLList": URLList, "Method": Method, "Data": Data}
return ([plugin_output])
def TransactionTableForURL(self, UseCache, URL, Method=None, Data=None):
# Have to make sure that those urls are visited ;),
# so we perform get transactions but don't save the transaction ids
self.requester.GetTransaction(UseCache, URL, method=Method, data=Data)
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TransactionTableForURL"
plugin_output["output"] = {"UseCache": UseCache, "URL": URL, "Method": Method, "Data": Data}
return ([plugin_output])
def CreateMatchTables(self, Num):
TableList = []
for x in range(0, Num):
TableList.append(self.CreateMatchTable())
return TableList
def HtmlString(self, html_string):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "HtmlString"
plugin_output["output"] = {"String": html_string}
return ([plugin_output])
def FindResponseHeaderMatchesForRegexpName(self, HeaderRegexpName):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "ResponseHeaderMatches"
plugin_output["output"] = {"HeaderRegexpName": HeaderRegexpName}
return ([plugin_output])
def FindResponseHeaderMatchesForRegexpNames(self, HeaderRegexpNamesList):
Results = []
for HeaderRegexpName in HeaderRegexpNamesList:
Results += self.FindResponseHeaderMatchesForRegexpName(HeaderRegexpName)
return Results
def FindResponseBodyMatchesForRegexpName(self, ResponseRegexpName):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "ResponseBodyMatches"
plugin_output["output"] = {"ResponseRegexpName": ResponseRegexpName}
return ([plugin_output])
def FindResponseBodyMatchesForRegexpNames(self, ResponseRegexpNamesList):
Results = []
for ResponseRegexpName in ResponseRegexpNamesList:
Results += self.FindResponseBodyMatchesForRegexpName(ResponseRegexpName)
return Results
def ResearchFingerprintInlog(self):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "FingerprintData"
plugin_output["output"] = {}
return ([plugin_output])
def FindTopTransactionsBySpeed(self, Order="Desc"):
plugin_output = dict(PLUGIN_OUTPUT)
plugin_output["type"] = "TopTransactionsBySpeed"
plugin_output["output"] = {"Order": Order}
return ([plugin_output])
| 16,354 | 940 | 23 |
789fe8d93e353e52bc663c1b04ebe0a242928996 | 439 | py | Python | Examples/Tests/ElectrostaticSphereEB/analysis.py | oshapoval/WarpX | 84d687da21ee93db67fdc43efec8a9cc80d0e6f9 | [
"BSD-3-Clause-LBNL"
] | 131 | 2018-09-29T08:11:40.000Z | 2022-03-28T23:24:22.000Z | Examples/Tests/ElectrostaticSphereEB/analysis.py | oshapoval/WarpX | 84d687da21ee93db67fdc43efec8a9cc80d0e6f9 | [
"BSD-3-Clause-LBNL"
] | 1,656 | 2018-10-02T01:49:24.000Z | 2022-03-31T21:27:31.000Z | Examples/Tests/ElectrostaticSphereEB/analysis.py | oshapoval/WarpX | 84d687da21ee93db67fdc43efec8a9cc80d0e6f9 | [
"BSD-3-Clause-LBNL"
] | 100 | 2018-10-01T20:41:14.000Z | 2022-03-10T10:30:42.000Z | #! /usr/bin/env python
# Run the default regression test for the PICMI version of the EB test
# using the same reference file as for the non-PICMI test since the two
# tests are otherwise the same.
import sys
sys.path.append('../../../../warpx/Regression/Checksum/')
import checksumAPI
my_check = checksumAPI.evaluate_checksum(
'ElectrostaticSphereEB', 'Python_ElectrostaticSphereEB_plt00001',
do_particles=False, atol=1e-12
)
| 27.4375 | 71 | 0.756264 | #! /usr/bin/env python
# Run the default regression test for the PICMI version of the EB test
# using the same reference file as for the non-PICMI test since the two
# tests are otherwise the same.
import sys
sys.path.append('../../../../warpx/Regression/Checksum/')
import checksumAPI
my_check = checksumAPI.evaluate_checksum(
'ElectrostaticSphereEB', 'Python_ElectrostaticSphereEB_plt00001',
do_particles=False, atol=1e-12
)
| 0 | 0 | 0 |
42f00f0572081954a483374e2ea0aeff890b47e7 | 155 | py | Python | Inheritance/Exercises/03. players_and_monsters/project/muse_elf.py | geodimitrov/PythonOOP_SoftUni | f1c6718c878b618b3ab3f174cd4d187bd178940b | [
"MIT"
] | 1 | 2021-06-30T11:53:44.000Z | 2021-06-30T11:53:44.000Z | Inheritance/Exercises/03. players_and_monsters/project/muse_elf.py | geodimitrov/PythonOOP_SoftUni | f1c6718c878b618b3ab3f174cd4d187bd178940b | [
"MIT"
] | null | null | null | Inheritance/Exercises/03. players_and_monsters/project/muse_elf.py | geodimitrov/PythonOOP_SoftUni | f1c6718c878b618b3ab3f174cd4d187bd178940b | [
"MIT"
] | null | null | null | from project.elf import Elf | 38.75 | 83 | 0.709677 | from project.elf import Elf
class MuseElf(Elf):
def __repr__(self):
return f"{self.username} of type {MuseElf.__name__} has level {self.level}" | 82 | -2 | 48 |
66fa7b2a753332d5db654924953961d53ffcdc80 | 588 | py | Python | djavError/models/problem_request.py | dasmith2/djavError | 6fc1bfcf8b1443be817a9bd8ec2d59e7682521dd | [
"MIT"
] | null | null | null | djavError/models/problem_request.py | dasmith2/djavError | 6fc1bfcf8b1443be817a9bd8ec2d59e7682521dd | [
"MIT"
] | null | null | null | djavError/models/problem_request.py | dasmith2/djavError | 6fc1bfcf8b1443be817a9bd8ec2d59e7682521dd | [
"MIT"
] | null | null | null | from django.db import models
from djaveDT import now
from djavError.models.fixable import Fixable
| 23.52 | 71 | 0.685374 | from django.db import models
from djaveDT import now
from djavError.models.fixable import Fixable
class ProblemRequest(Fixable):
path = models.CharField(max_length=300)
method = models.CharField(max_length=10)
variables = models.TextField(
null=True, blank=True,
help_text='request.POST or request.GET, depending on the method')
def increment(self):
self.count += 1
self.latest = now()
self.save()
def save(self, *args, **kwargs):
if not self.latest:
self.latest = now()
super().save(*args, **kwargs)
class Meta:
abstract = True
| 154 | 312 | 23 |
69522746e5515b3e9ee61ed5ac487690c3a0c5c6 | 3,319 | py | Python | scripts/sisc/paper_plot_pochoir_comparison.py | tareqmalas/girih | 0c126788937d189147be47115703b752235e585c | [
"BSD-3-Clause"
] | 7 | 2015-07-14T08:29:14.000Z | 2021-07-30T14:53:13.000Z | scripts/sisc/paper_plot_pochoir_comparison.py | tareqmalas/girih | 0c126788937d189147be47115703b752235e585c | [
"BSD-3-Clause"
] | null | null | null | scripts/sisc/paper_plot_pochoir_comparison.py | tareqmalas/girih | 0c126788937d189147be47115703b752235e585c | [
"BSD-3-Clause"
] | 3 | 2016-08-30T01:25:40.000Z | 2017-06-22T05:50:05.000Z | #!/usr/bin/env python
if __name__ == "__main__":
plot_stacked_clustered_bars()
| 32.223301 | 95 | 0.638445 | #!/usr/bin/env python
def plot_stacked_clustered_bars():
from operator import itemgetter
import matplotlib.pyplot as plt
import pylab
import numpy as np
from pylab import arange,pi,sin,cos,sqrt
sec_fontsize = 13
fig_width = 2*7.4*0.393701 # inches
fig_height = 0.75*fig_width #* 210.0/280.0#433.62/578.16
fig_size = [fig_width,fig_height]
params = {
'axes.labelsize': 12,
# 'axes.linewidth': 0.5,
# 'lines.linewidth': 0.75,
'text.fontsize': 12,
'legend.fontsize': 12,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
# 'lines.markersize': 3,
'text.usetex': True,
'figure.figsize': fig_size}
pylab.rcParams.update(params)
stencils = ['7pt const.', '7pt var.', '25pt var.','','','','','','','','']
processors = ['','Westmere','','', '','Ivy Bridge','', '','','Haswell','','']
pochoir_perf = [806.0, 260.0, 93.0, 0, 2814.0, 753.0, 267.0, 0, 3716.0, 0966.0, 378.0]
girih_perf = [1475.0, 445.0, 148.0, 0, 3975.0, 1266.0, 363.0, 0, 6125.0, 1940.0, 559.0]
pochoir_perf = [i/1e3 for i in pochoir_perf]
girih_perf = [i/1e3 for i in girih_perf]
speedup = [0]*len(stencils)
for i in range(len(stencils)):
speedup[i] = girih_perf[i]/pochoir_perf[i] if pochoir_perf[i] else 0.0
nx = len(pochoir_perf)
x = range(nx)
cluster_size = 4
fig = plt.figure()
fig.subplots_adjust(bottom=0.25) # space on the bottom for second time axis
host = fig.add_subplot(111) # setup plot
width =0.8
p1 = host.bar(x, girih_perf, width, color='0.65', align='center', hatch="")
p2 = host.bar(x, pochoir_perf, width, color='0.95', align='center', hatch="/")
for i, r in enumerate(p1):
if speedup[i] > 0:
height = r.get_height()
host.text(r.get_x()+width/2., height+0.01, '%2.1fx'%speedup[i], ha='center', va='bottom')
host.set_ylabel('GLUP/s', fontsize=sec_fontsize)
# host.set_xlabel('Processor', fontsize=sec_fontsize)
host.tick_params(axis='both', which='major', labelsize=sec_fontsize)
host.tick_params(axis='both', which='minor', labelsize=sec_fontsize)
xtk = np.arange(nx)
for i in range(nx):
if i%cluster_size >= 3:
xtk[i]=0
host.set_xticks(xtk)
host.set_xticklabels(processors)
# Insert the time steppers names at the X-axis
newax = host.twiny() # create new axis
newax.xaxis.set_ticks_position('bottom')
newax.spines['bottom'].set_position(('outward', 20))
# newax.patch.set_visible(False)
# newax.xaxis.set_label_position('bottom')
# newax.set_frame_on(False)
# newax.tick_params('x', width=0)
ticks = np.arange(nx)
for i in range(nx):
if (i >= 3):
ticks[i] = 0
newax.set_xticks(ticks)
newax.set_xticklabels(stencils, rotation=90, size='medium')
newax.axis((0.0, float(nx), 0.0, max(girih_perf)*1.1))
newax.set_xlim((-width, nx))
host.set_xlim((-width, nx))
host.yaxis.grid()
#pylab.legend(p1[:nts], ts_set)
host.legend( (p1, p2), ('MWD', 'Pochoir'), loc='center left', fontsize=sec_fontsize)
f_name = "pochoir_comparison"
#pylab.savefig(f_name+'.pdf', format='pdf', bbox_inches="tight", pad_inches=0)
pylab.savefig(f_name+'.eps', format='eps', bbox_inches="tight", pad_inches=0.02)
return
if __name__ == "__main__":
plot_stacked_clustered_bars()
| 3,208 | 0 | 23 |
1c35a915fceb0b31f4541e4a9cb30f32209280a0 | 2,441 | py | Python | task-example.py | EverAzureRest/batch_examples | 7daec97a468770c3d07cdb02f67951e5be75c153 | [
"MIT"
] | null | null | null | task-example.py | EverAzureRest/batch_examples | 7daec97a468770c3d07cdb02f67951e5be75c153 | [
"MIT"
] | null | null | null | task-example.py | EverAzureRest/batch_examples | 7daec97a468770c3d07cdb02f67951e5be75c153 | [
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import azure.batch.batch_auth as batchauth
import azure.batch._batch_service_client as batch
import uuid
import datetime
import time
# Batch account credentials
BATCH_ACCOUNT_NAME = ''
BATCH_ACCOUNT_URL = ''
BATCH_ACCOUNT_KEY = ''
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage.
credentials = batchauth.SharedKeyCredentials(BATCH_ACCOUNT_NAME,
BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=BATCH_ACCOUNT_URL)
pool = batch_client.pool.get(
pool_id='testPool'
)
##ToDO: Create nodes prior to run.
poolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=1
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=poolResizeParam
)
job = batch.models.JobAddParameter(
id=str(uuid.uuid1()),
display_name='myBatchJob',
pool_info=batch.models.PoolInformation(
pool_id=pool.id
),
uses_task_dependencies = 'true'
)
job1 = batch_client.job.add(job)
task1 = batch.models.TaskAddParameter(
id='task1',
command_line='cmd /c echo "Hello From Batch" >task.txt'
)
dependentTasks = list()
dependentTasks.append(task1.id)
task2 = batch.models.TaskAddParameter(
id='task2',
command_line = 'cmd /c echo "this is task2 - should execute after task 1" >task2.txt',
depends_on = batch.models.TaskDependencies(task_ids=dependentTasks)
)
tasks = list()
tasks.append(task1)
tasks.append(task2)
batch_client.task.add_collection(
job_id=job.id,
value=tasks
)
# Perform action with the batch_client
jobs = batch_client.job.list()
for job in jobs:
print(job.id)
##Todo, watch tasks for completion and resize pool to zero
job_timeout = timedelta(minutes=30)
timeout_expiration = datetime.datetime.now() + job_timeout
while datetime.datetime.now() < timeout_expiration:
tasks = batch_client.task.list(job.id)
incomplete_tasks = [task for task in tasks if
task.state != batch.models.TaskState.completed]
if not incomplete_tasks:
time.sleep(600)
newpoolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=0
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=newpoolResizeParam
)
else:
time.sleep(1) | 24.656566 | 90 | 0.711184 |
from datetime import datetime, timedelta
import azure.batch.batch_auth as batchauth
import azure.batch._batch_service_client as batch
import uuid
import datetime
import time
# Batch account credentials
BATCH_ACCOUNT_NAME = ''
BATCH_ACCOUNT_URL = ''
BATCH_ACCOUNT_KEY = ''
# Create a Batch service client. We'll now be interacting with the Batch
# service in addition to Storage.
credentials = batchauth.SharedKeyCredentials(BATCH_ACCOUNT_NAME,
BATCH_ACCOUNT_KEY)
batch_client = batch.BatchServiceClient(
credentials,
batch_url=BATCH_ACCOUNT_URL)
pool = batch_client.pool.get(
pool_id='testPool'
)
##ToDO: Create nodes prior to run.
poolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=1
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=poolResizeParam
)
job = batch.models.JobAddParameter(
id=str(uuid.uuid1()),
display_name='myBatchJob',
pool_info=batch.models.PoolInformation(
pool_id=pool.id
),
uses_task_dependencies = 'true'
)
job1 = batch_client.job.add(job)
task1 = batch.models.TaskAddParameter(
id='task1',
command_line='cmd /c echo "Hello From Batch" >task.txt'
)
dependentTasks = list()
dependentTasks.append(task1.id)
task2 = batch.models.TaskAddParameter(
id='task2',
command_line = 'cmd /c echo "this is task2 - should execute after task 1" >task2.txt',
depends_on = batch.models.TaskDependencies(task_ids=dependentTasks)
)
tasks = list()
tasks.append(task1)
tasks.append(task2)
batch_client.task.add_collection(
job_id=job.id,
value=tasks
)
# Perform action with the batch_client
jobs = batch_client.job.list()
for job in jobs:
print(job.id)
##Todo, watch tasks for completion and resize pool to zero
job_timeout = timedelta(minutes=30)
timeout_expiration = datetime.datetime.now() + job_timeout
while datetime.datetime.now() < timeout_expiration:
tasks = batch_client.task.list(job.id)
incomplete_tasks = [task for task in tasks if
task.state != batch.models.TaskState.completed]
if not incomplete_tasks:
time.sleep(600)
newpoolResizeParam = batch.models.PoolResizeParameter(
target_dedicated_nodes=0
)
batch_client.pool.resize(
pool_id=pool.id,
pool_resize_parameter=newpoolResizeParam
)
else:
time.sleep(1) | 0 | 0 | 0 |
f74a207d8ad9dfa591242c7ec093206515629648 | 12,014 | py | Python | fiftyone/core/document.py | seantrue/fiftyone | 75c853714d2712b8da51a5c53ae68f6c47229b06 | [
"Apache-2.0"
] | null | null | null | fiftyone/core/document.py | seantrue/fiftyone | 75c853714d2712b8da51a5c53ae68f6c47229b06 | [
"Apache-2.0"
] | null | null | null | fiftyone/core/document.py | seantrue/fiftyone | 75c853714d2712b8da51a5c53ae68f6c47229b06 | [
"Apache-2.0"
] | null | null | null | """
Base class for objects that are backed by database documents.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import eta.core.serial as etas
class Document(object):
"""Base class for objects that are associated with
:class:`fiftyone.core.dataset.Dataset` instances and are backed by
documents in database collections.
Args:
dataset (None): the :class:`fiftyone.core.dataset.Dataset` to which the
document belongs
"""
@property
def id(self):
"""The ID of the document, or ``None`` if it has not been added to the
database.
"""
return str(self._doc.id) if self._in_db else None
@property
def _id(self):
"""The ObjectId of the document, or ``None`` if it has not been added
to the database.
"""
return self._doc.id if self._in_db else None
@property
def ingest_time(self):
"""The time the document was added to the database, or ``None`` if it
has not been added to the database.
"""
return self._doc.ingest_time
@property
def in_dataset(self):
"""Whether the document has been added to a dataset."""
return self.dataset is not None
@property
def dataset(self):
"""The dataset to which this document belongs, or ``None`` if it has
not been added to a dataset.
"""
return self._dataset
@property
def field_names(self):
"""An ordered tuple of the names of the fields of this document."""
return self._doc.field_names
@property
def _in_db(self):
"""Whether the underlying :class:`fiftyone.core.odm.Document` has
been inserted into the database.
"""
return self._doc.in_db
@property
def _skip_iter_field_names(self):
"""A tuple of names of fields to skip when :meth:`iter_fields` is
called.
"""
return tuple()
def _get_field_names(self, include_private=False):
"""Returns an ordered tuple of field names of this document.
Args:
include_private (False): whether to include private fields
Returns:
a tuple of field names
"""
return self._doc._get_field_names(include_private=include_private)
def get_field(self, field_name):
"""Gets the value of a field of the document.
Args:
field_name: the field name
Returns:
the field value
Raises:
AttributeError: if the field does not exist
"""
return self._doc.get_field(field_name)
def set_field(self, field_name, value, create=True):
"""Sets the value of a field of the document.
Args:
field_name: the field name
value: the field value
create (True): whether to create the field if it does not exist
Raises:
ValueError: if ``field_name`` is not an allowed field name or does
not exist and ``create == False``
"""
if field_name.startswith("_"):
raise ValueError(
"Invalid field name: '%s'. Field names cannot start with '_'"
% field_name
)
self._doc.set_field(field_name, value, create=create)
def update_fields(self, fields_dict, create=True):
"""Sets the dictionary of fields on the document.
Args:
fields_dict: a dict mapping field names to values
create (True): whether to create fields if they do not exist
"""
for field_name, value in fields_dict.items():
self.set_field(field_name, value, create=create)
def clear_field(self, field_name):
"""Clears the value of a field of the document.
Args:
field_name: the name of the field to clear
Raises:
ValueError: if the field does not exist
"""
self._doc.clear_field(field_name)
def iter_fields(self):
"""Returns an iterator over the ``(name, value)`` pairs of the fields
of the document.
Private fields are omitted.
Returns:
an iterator that emits ``(name, value)`` tuples
"""
field_names = tuple(
f for f in self.field_names if f not in self._skip_iter_field_names
)
for field_name in field_names:
yield field_name, self.get_field(field_name)
def merge(self, document, overwrite=True):
"""Merges the fields of the document into this document.
``None``-valued fields are always omitted.
Args:
document: a :class:`Document` of the same type
overwrite (True): whether to overwrite existing fields. Note that
existing fields whose values are ``None`` are always
overwritten
"""
existing_field_names = self.field_names
for field_name, value in document.iter_fields():
if value is None:
continue
if (
not overwrite
and (field_name in existing_field_names)
and (self[field_name] is not None)
):
continue
self.set_field(field_name, value)
def copy(self):
"""Returns a deep copy of the document that has not been added to the
database.
Returns:
a :class:`Document`
"""
kwargs = {k: deepcopy(v) for k, v in self.iter_fields()}
return self.__class__(**kwargs)
def to_dict(self):
"""Serializes the document to a JSON dictionary.
Sample IDs and private fields are excluded in this representation.
Returns:
a JSON dict
"""
d = self._doc.to_dict(extended=True)
return {k: v for k, v in d.items() if not k.startswith("_")}
def to_json(self, pretty_print=False):
"""Serializes the document to a JSON string.
Sample IDs and private fields are excluded in this representation.
Args:
pretty_print (False): whether to render the JSON in human readable
format with newlines and indentations
Returns:
a JSON string
"""
return etas.json_to_str(self.to_dict(), pretty_print=pretty_print)
def to_mongo_dict(self):
"""Serializes the document to a BSON dictionary equivalent to the
representation that would be stored in the database.
Returns:
a BSON dict
"""
return self._doc.to_dict(extended=False)
def save(self):
"""Saves the document to the database."""
self._doc.save()
def reload(self):
"""Reloads the document from the database."""
self._doc.reload()
def _delete(self):
"""Deletes the document from the database."""
self._doc.delete()
@classmethod
def from_dict(cls, d):
"""Loads the document from a JSON dictionary.
The returned document will not belong to a dataset.
Returns:
a :class:`Document`
"""
doc = cls._NO_COLL_CLS.from_dict(d, extended=True)
return cls.from_doc(doc)
@classmethod
def from_json(cls, s):
"""Loads the document from a JSON string.
Args:
s: the JSON string
Returns:
a :class:`Document`
"""
doc = cls._NO_COLL_CL.from_json(s)
return cls.from_doc(doc)
@classmethod
def _rename_field(cls, collection_name, field_name, new_field_name):
"""Renames any field values for in-memory document instances that
belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
field_name: the name of the field to rename
new_field_name: the new field name
"""
for document in cls._instances[collection_name].values():
data = document._doc._data
data[new_field_name] = data.pop(field_name, None)
@classmethod
def _purge_field(cls, collection_name, field_name):
"""Removes values for the given field from all in-memory document
instances that belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
field_name: the name of the field to purge
"""
for document in cls._instances[collection_name].values():
document._doc._data.pop(field_name, None)
@classmethod
def _reload_docs(cls, collection_name):
"""Reloads the backing documents for all in-memory document instances
that belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
"""
for document in cls._instances[collection_name].values():
document.reload()
def _set_backing_doc(self, doc, dataset=None):
"""Sets the backing doc for the document.
Args:
doc: a :class:`fiftyone.core.odm.SampleDocument`
dataset (None): the :class:`fiftyone.core.dataset.Dataset` to which
the document belongs, if any
"""
# Ensure the doc is saved to the database
if not doc.id:
doc.save()
self._doc = doc
# Save weak reference
dataset_instances = self._instances[doc.collection_name]
if self.id not in dataset_instances:
dataset_instances[self.id] = self
self._dataset = dataset
@classmethod
def _reset_backing_docs(cls, collection_name, doc_ids):
"""Resets the document(s) backing documents.
Args:
collection_name: the name of the MongoDB collection
doc_ids: a list of document IDs
"""
dataset_instances = cls._instances[collection_name]
for doc_id in doc_ids:
document = dataset_instances.pop(doc_id, None)
if document is not None:
document._reset_backing_doc()
@classmethod
def _reset_all_backing_docs(cls, collection_name):
"""Resets the backing documents for all documents in the collection.
Args:
collection_name: the name of the MongoDB collection
"""
if collection_name not in cls._instances:
return
dataset_instances = cls._instances.pop(collection_name)
for document in dataset_instances.values():
document._reset_backing_doc()
| 30.035 | 79 | 0.598219 | """
Base class for objects that are backed by database documents.
| Copyright 2017-2020, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
from copy import deepcopy
import eta.core.serial as etas
class Document(object):
"""Base class for objects that are associated with
:class:`fiftyone.core.dataset.Dataset` instances and are backed by
documents in database collections.
Args:
dataset (None): the :class:`fiftyone.core.dataset.Dataset` to which the
document belongs
"""
def __init__(self, dataset=None):
self._dataset = dataset
def __dir__(self):
return super().__dir__() + list(self.field_names)
def __getattr__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
if name != "_doc":
return self._doc.get_field(name)
else:
raise
def __setattr__(self, name, value):
if name.startswith("_") or (
hasattr(self, name) and not self._doc.has_field(name)
):
super().__setattr__(name, value)
else:
try:
self._secure_media(name, value)
except AttributeError:
pass
self._doc.__setattr__(name, value)
def __delattr__(self, name):
try:
self.__delitem__(name)
except KeyError:
super().__delattr__(name)
def __delitem__(self, field_name):
try:
self.clear_field(field_name)
except ValueError as e:
raise KeyError(e.args[0])
def __copy__(self):
return self.copy()
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self._doc == other._doc
@property
def id(self):
"""The ID of the document, or ``None`` if it has not been added to the
database.
"""
return str(self._doc.id) if self._in_db else None
@property
def _id(self):
"""The ObjectId of the document, or ``None`` if it has not been added
to the database.
"""
return self._doc.id if self._in_db else None
@property
def ingest_time(self):
"""The time the document was added to the database, or ``None`` if it
has not been added to the database.
"""
return self._doc.ingest_time
@property
def in_dataset(self):
"""Whether the document has been added to a dataset."""
return self.dataset is not None
@property
def dataset(self):
"""The dataset to which this document belongs, or ``None`` if it has
not been added to a dataset.
"""
return self._dataset
@property
def field_names(self):
"""An ordered tuple of the names of the fields of this document."""
return self._doc.field_names
@property
def _in_db(self):
"""Whether the underlying :class:`fiftyone.core.odm.Document` has
been inserted into the database.
"""
return self._doc.in_db
@property
def _skip_iter_field_names(self):
"""A tuple of names of fields to skip when :meth:`iter_fields` is
called.
"""
return tuple()
def _get_field_names(self, include_private=False):
"""Returns an ordered tuple of field names of this document.
Args:
include_private (False): whether to include private fields
Returns:
a tuple of field names
"""
return self._doc._get_field_names(include_private=include_private)
def get_field(self, field_name):
"""Gets the value of a field of the document.
Args:
field_name: the field name
Returns:
the field value
Raises:
AttributeError: if the field does not exist
"""
return self._doc.get_field(field_name)
def set_field(self, field_name, value, create=True):
"""Sets the value of a field of the document.
Args:
field_name: the field name
value: the field value
create (True): whether to create the field if it does not exist
Raises:
ValueError: if ``field_name`` is not an allowed field name or does
not exist and ``create == False``
"""
if field_name.startswith("_"):
raise ValueError(
"Invalid field name: '%s'. Field names cannot start with '_'"
% field_name
)
self._doc.set_field(field_name, value, create=create)
def update_fields(self, fields_dict, create=True):
"""Sets the dictionary of fields on the document.
Args:
fields_dict: a dict mapping field names to values
create (True): whether to create fields if they do not exist
"""
for field_name, value in fields_dict.items():
self.set_field(field_name, value, create=create)
def clear_field(self, field_name):
"""Clears the value of a field of the document.
Args:
field_name: the name of the field to clear
Raises:
ValueError: if the field does not exist
"""
self._doc.clear_field(field_name)
def iter_fields(self):
"""Returns an iterator over the ``(name, value)`` pairs of the fields
of the document.
Private fields are omitted.
Returns:
an iterator that emits ``(name, value)`` tuples
"""
field_names = tuple(
f for f in self.field_names if f not in self._skip_iter_field_names
)
for field_name in field_names:
yield field_name, self.get_field(field_name)
def merge(self, document, overwrite=True):
"""Merges the fields of the document into this document.
``None``-valued fields are always omitted.
Args:
document: a :class:`Document` of the same type
overwrite (True): whether to overwrite existing fields. Note that
existing fields whose values are ``None`` are always
overwritten
"""
existing_field_names = self.field_names
for field_name, value in document.iter_fields():
if value is None:
continue
if (
not overwrite
and (field_name in existing_field_names)
and (self[field_name] is not None)
):
continue
self.set_field(field_name, value)
def copy(self):
"""Returns a deep copy of the document that has not been added to the
database.
Returns:
a :class:`Document`
"""
kwargs = {k: deepcopy(v) for k, v in self.iter_fields()}
return self.__class__(**kwargs)
def to_dict(self):
"""Serializes the document to a JSON dictionary.
Sample IDs and private fields are excluded in this representation.
Returns:
a JSON dict
"""
d = self._doc.to_dict(extended=True)
return {k: v for k, v in d.items() if not k.startswith("_")}
def to_json(self, pretty_print=False):
"""Serializes the document to a JSON string.
Sample IDs and private fields are excluded in this representation.
Args:
pretty_print (False): whether to render the JSON in human readable
format with newlines and indentations
Returns:
a JSON string
"""
return etas.json_to_str(self.to_dict(), pretty_print=pretty_print)
def to_mongo_dict(self):
"""Serializes the document to a BSON dictionary equivalent to the
representation that would be stored in the database.
Returns:
a BSON dict
"""
return self._doc.to_dict(extended=False)
def save(self):
"""Saves the document to the database."""
self._doc.save()
def reload(self):
"""Reloads the document from the database."""
self._doc.reload()
def _delete(self):
"""Deletes the document from the database."""
self._doc.delete()
@classmethod
def from_dict(cls, d):
"""Loads the document from a JSON dictionary.
The returned document will not belong to a dataset.
Returns:
a :class:`Document`
"""
doc = cls._NO_COLL_CLS.from_dict(d, extended=True)
return cls.from_doc(doc)
@classmethod
def from_json(cls, s):
"""Loads the document from a JSON string.
Args:
s: the JSON string
Returns:
a :class:`Document`
"""
doc = cls._NO_COLL_CL.from_json(s)
return cls.from_doc(doc)
@classmethod
def _rename_field(cls, collection_name, field_name, new_field_name):
"""Renames any field values for in-memory document instances that
belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
field_name: the name of the field to rename
new_field_name: the new field name
"""
for document in cls._instances[collection_name].values():
data = document._doc._data
data[new_field_name] = data.pop(field_name, None)
@classmethod
def _purge_field(cls, collection_name, field_name):
"""Removes values for the given field from all in-memory document
instances that belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
field_name: the name of the field to purge
"""
for document in cls._instances[collection_name].values():
document._doc._data.pop(field_name, None)
@classmethod
def _reload_docs(cls, collection_name):
"""Reloads the backing documents for all in-memory document instances
that belong to the specified collection.
Args:
collection_name: the name of the MongoDB collection
"""
for document in cls._instances[collection_name].values():
document.reload()
def _set_backing_doc(self, doc, dataset=None):
"""Sets the backing doc for the document.
Args:
doc: a :class:`fiftyone.core.odm.SampleDocument`
dataset (None): the :class:`fiftyone.core.dataset.Dataset` to which
the document belongs, if any
"""
# Ensure the doc is saved to the database
if not doc.id:
doc.save()
self._doc = doc
# Save weak reference
dataset_instances = self._instances[doc.collection_name]
if self.id not in dataset_instances:
dataset_instances[self.id] = self
self._dataset = dataset
@classmethod
def _reset_backing_docs(cls, collection_name, doc_ids):
"""Resets the document(s) backing documents.
Args:
collection_name: the name of the MongoDB collection
doc_ids: a list of document IDs
"""
dataset_instances = cls._instances[collection_name]
for doc_id in doc_ids:
document = dataset_instances.pop(doc_id, None)
if document is not None:
document._reset_backing_doc()
@classmethod
def _reset_all_backing_docs(cls, collection_name):
"""Resets the backing documents for all documents in the collection.
Args:
collection_name: the name of the MongoDB collection
"""
if collection_name not in cls._instances:
return
dataset_instances = cls._instances.pop(collection_name)
for document in dataset_instances.values():
document._reset_backing_doc()
def _reset_backing_doc(self):
self._doc = self.copy()._doc
self._dataset = None
| 1,148 | 0 | 243 |
389491d65002d63e720ae68458794040be96e646 | 15,782 | py | Python | sfa_api/utils/tests/test_validators.py | lboeman/solarforecastarbiter-api | 9df598b5c638c3e36d0649e08e955b3ddc1b542d | [
"MIT"
] | 7 | 2018-12-07T22:05:36.000Z | 2020-05-03T03:20:50.000Z | sfa_api/utils/tests/test_validators.py | lboeman/solarforecastarbiter-api | 9df598b5c638c3e36d0649e08e955b3ddc1b542d | [
"MIT"
] | 220 | 2018-11-01T23:33:19.000Z | 2021-12-02T21:06:38.000Z | sfa_api/utils/tests/test_validators.py | lboeman/solarforecastarbiter-api | 9df598b5c638c3e36d0649e08e955b3ddc1b542d | [
"MIT"
] | 3 | 2018-10-31T20:55:07.000Z | 2021-11-10T22:51:43.000Z | from copy import deepcopy
import datetime as dt
from marshmallow.exceptions import ValidationError
import pytest
import pytz
from sfa_api.conftest import (VALID_OBS_JSON, VALID_FORECAST_JSON,
VALID_CDF_FORECAST_JSON, VALID_FORECAST_AGG_JSON,
VALID_AGG_JSON)
from sfa_api.utils.errors import StorageAuthError
from sfa_api.utils import validators
@pytest.mark.parametrize('thetime', [
'09:00', '9:00', '00:00'
])
@pytest.mark.parametrize('bad', [
'25:00', '00:00:00', 'ab:cd', '10:88'
])
@pytest.mark.parametrize('thestring', [
'mysite', 'Site 1', 'A really long but otherwise OK site',
"apostrophe '", 'site_99', 'site tucson, az',
"Test (site)", 'w,', 'test-hyphen'
])
@pytest.mark.parametrize('thestring', [
'<script>bac</script>', '<', ';delete',
'site:a:b', 'site+1', 'site\\G',
'site\n', '', ' ', "'", "' ", '_', ',',
',_', '()', "'()',", "(){ :|:& };"
])
@pytest.mark.parametrize('tz', [
'America/Phoenix',
'Etc/GMT+7'
])
@pytest.mark.parametrize('tz', ['PDT', 'Germany/Berlin'])
@pytest.mark.parametrize('time_', [
dt.datetime(2019, 1, 1, 12, 3, tzinfo=pytz.timezone('MST')),
dt.datetime(2019, 1, 1, 12, 3),
dt.datetime(1969, 12, 31, 17, 0, 1, tzinfo=pytz.timezone('MST')),
])
@pytest.mark.parametrize('time_', [
dt.datetime(2049, 1, 1, 12, 3),
dt.datetime(1969, 12, 31, 14, 0, 1, tzinfo=pytz.timezone('MST')),
])
@pytest.mark.parametrize("valid", [
None, "observation_uncertainty", "0.0",
] + list(range(0, 101, 10))
)
@pytest.mark.parametrize("invalid", [
"None", "bad string", "101", "-1.0"
])
@pytest.mark.parametrize("data", [
{'variable': 'event', 'interval_label': 'event'},
{'variable': 'notevent', 'interval_label': 'notevent'},
])
@pytest.mark.parametrize("data", [
{'variable': 'event', 'interval_label': 'notevent'},
{'variable': 'notevent', 'interval_label': 'event'},
])
# Create objects for testing report object pairs
VALID_CDF_SINGLE_JSON = deepcopy(VALID_CDF_FORECAST_JSON)
VALID_CDF_SINGLE_JSON.pop('constant_values')
VALID_CDF_SINGLE_JSON.update({
'axis': 'x',
'constant_value': '5.0'
})
VALID_FORECAST_AGG_JSON_60 = deepcopy(VALID_FORECAST_AGG_JSON)
VALID_FORECAST_AGG_JSON_60['interval_length'] = 60
VALID_AGG_JSON_WITH_ID = deepcopy(VALID_AGG_JSON)
VALID_AGG_JSON_WITH_ID.update({
'aggregate_id': VALID_FORECAST_AGG_JSON_60['aggregate_id'],
})
VALID_EVENT_FORECAST_JSON = deepcopy(VALID_FORECAST_JSON)
VALID_EVENT_FORECAST_JSON.update({
'variable': 'event',
'interval_label': 'event',
})
VALID_EVENT_OBS_JSON = deepcopy(VALID_OBS_JSON)
VALID_EVENT_OBS_JSON.update({
'variable': 'event',
'interval_label': 'event',
})
@pytest.fixture()
@pytest.mark.parametrize('fx,meas', [
(VALID_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON),
(VALID_FORECAST_AGG_JSON_60, VALID_AGG_JSON_WITH_ID),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON),
])
@pytest.fixture(params=[
('variable', 'bad'), ('interval_length', 120), ('site_id', 'bad'),
('aggregate_id', 'bad')])
@pytest.mark.parametrize('fx,meas', [
(VALID_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON),
(VALID_FORECAST_AGG_JSON_60, VALID_AGG_JSON_WITH_ID),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON),
])
@pytest.mark.parametrize('fx,obs,agg,forecast_type,include_ref_fx', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', False),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', True),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', False),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', True),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', False),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', True),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', False),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', True),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', False),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', True),
])
@pytest.fixture(params=[
('variable', 'bad'), ('interval_length', 120), ('site_id', 'bad'),
('aggregate_id', 'bad'), ('axis', 'y'), ('constant_value', 13.2)])
@pytest.mark.parametrize('fx, forecast_type', [
(VALID_FORECAST_JSON, 'forecast'),
(VALID_FORECAST_AGG_JSON_60, 'forecast'),
(VALID_CDF_FORECAST_JSON, 'probabilistic_forecast'),
(VALID_CDF_SINGLE_JSON, 'probabilistic_forecast_constant_value'),
(VALID_EVENT_FORECAST_JSON, 'event_forecast'),
])
@pytest.mark.parametrize('fx,obs,agg,forecast_type', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID, 'forecast'),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None, 'probabilistic_forecast'),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value'),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None, 'event_forecast'),
])
@pytest.mark.parametrize('fx,obs,agg,forecast_type,include_ref_fx', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', False),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', True),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', False),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', True),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', False),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', True),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', False),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', True),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', False),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', True),
])
@pytest.fixture()
@pytest.mark.parametrize('fx,obs,agg,failure_mode', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast'),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'observation'),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'reference_forecast'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID, 'forecast'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID, 'aggregate'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'reference_forecast'),
])
@pytest.mark.parametrize("data", [13, 17, 52])
@pytest.mark.parametrize("data", [1, 3, 5, 15, 30, 60, 90])
| 34.458515 | 79 | 0.692371 | from copy import deepcopy
import datetime as dt
from marshmallow.exceptions import ValidationError
import pytest
import pytz
from sfa_api.conftest import (VALID_OBS_JSON, VALID_FORECAST_JSON,
VALID_CDF_FORECAST_JSON, VALID_FORECAST_AGG_JSON,
VALID_AGG_JSON)
from sfa_api.utils.errors import StorageAuthError
from sfa_api.utils import validators
@pytest.mark.parametrize('thetime', [
'09:00', '9:00', '00:00'
])
def test_time_format(thetime):
assert validators.TimeFormat('%H:%M')(thetime) == thetime
@pytest.mark.parametrize('bad', [
'25:00', '00:00:00', 'ab:cd', '10:88'
])
def test_time_format_fail(bad):
with pytest.raises(ValidationError):
validators.TimeFormat('%H:%M')(bad)
@pytest.mark.parametrize('thestring', [
'mysite', 'Site 1', 'A really long but otherwise OK site',
"apostrophe '", 'site_99', 'site tucson, az',
"Test (site)", 'w,', 'test-hyphen'
])
def test_userstring(thestring):
assert validators.UserstringValidator()(
thestring) == thestring
@pytest.mark.parametrize('thestring', [
'<script>bac</script>', '<', ';delete',
'site:a:b', 'site+1', 'site\\G',
'site\n', '', ' ', "'", "' ", '_', ',',
',_', '()', "'()',", "(){ :|:& };"
])
def test_invalid_userstring(thestring):
with pytest.raises(ValidationError):
validators.UserstringValidator()(thestring)
@pytest.mark.parametrize('tz', [
'America/Phoenix',
'Etc/GMT+7'
])
def test_timezonevalidator(tz):
assert validators.TimezoneValidator()(tz) == tz
@pytest.mark.parametrize('tz', ['PDT', 'Germany/Berlin'])
def test_timezonevalidator_fail(tz):
with pytest.raises(ValidationError):
validators.TimezoneValidator()(tz)
@pytest.mark.parametrize('time_', [
dt.datetime(2019, 1, 1, 12, 3, tzinfo=pytz.timezone('MST')),
dt.datetime(2019, 1, 1, 12, 3),
dt.datetime(1969, 12, 31, 17, 0, 1, tzinfo=pytz.timezone('MST')),
])
def test_timelimit_validator(time_):
assert validators.TimeLimitValidator()(time_) == time_
@pytest.mark.parametrize('time_', [
dt.datetime(2049, 1, 1, 12, 3),
dt.datetime(1969, 12, 31, 14, 0, 1, tzinfo=pytz.timezone('MST')),
])
def test_timelimit_validator_fail(time_):
with pytest.raises(ValidationError):
validators.TimeLimitValidator()(time_)
@pytest.mark.parametrize("valid", [
None, "observation_uncertainty", "0.0",
] + list(range(0, 101, 10))
)
def test_uncertainty_validator(valid):
assert validators.UncertaintyValidator()(valid) == valid
@pytest.mark.parametrize("invalid", [
"None", "bad string", "101", "-1.0"
])
def test_uncertainty_validator_errors(invalid):
with pytest.raises(ValidationError):
validators.UncertaintyValidator()(invalid)
@pytest.mark.parametrize("data", [
{'variable': 'event', 'interval_label': 'event'},
{'variable': 'notevent', 'interval_label': 'notevent'},
])
def test_validate_if_event(data):
validators.validate_if_event({}, data)
@pytest.mark.parametrize("data", [
{'variable': 'event', 'interval_label': 'notevent'},
{'variable': 'notevent', 'interval_label': 'event'},
])
def test_validate_if_event_error(data):
with pytest.raises(ValidationError):
validators.validate_if_event({}, data)
# Create objects for testing report object pairs
VALID_CDF_SINGLE_JSON = deepcopy(VALID_CDF_FORECAST_JSON)
VALID_CDF_SINGLE_JSON.pop('constant_values')
VALID_CDF_SINGLE_JSON.update({
'axis': 'x',
'constant_value': '5.0'
})
VALID_FORECAST_AGG_JSON_60 = deepcopy(VALID_FORECAST_AGG_JSON)
VALID_FORECAST_AGG_JSON_60['interval_length'] = 60
VALID_AGG_JSON_WITH_ID = deepcopy(VALID_AGG_JSON)
VALID_AGG_JSON_WITH_ID.update({
'aggregate_id': VALID_FORECAST_AGG_JSON_60['aggregate_id'],
})
VALID_EVENT_FORECAST_JSON = deepcopy(VALID_FORECAST_JSON)
VALID_EVENT_FORECAST_JSON.update({
'variable': 'event',
'interval_label': 'event',
})
VALID_EVENT_OBS_JSON = deepcopy(VALID_OBS_JSON)
VALID_EVENT_OBS_JSON.update({
'variable': 'event',
'interval_label': 'event',
})
@pytest.fixture()
def mock_reads(mocker):
def fn(fx=None, obs=None, agg=None, ref_fx=None):
storage_mock = mocker.MagicMock()
storage_mock.read_forecast = mocker.MagicMock(side_effect=[fx, ref_fx])
storage_mock.read_cdf_forecast_group = mocker.MagicMock(
side_effect=[fx, ref_fx])
storage_mock.read_cdf_forecast = mocker.MagicMock(
side_effect=[fx, ref_fx])
storage_mock.read_observation = mocker.MagicMock(return_value=obs)
storage_mock.read_aggregate = mocker.MagicMock(return_value=agg)
mocker.patch('sfa_api.utils.validators.get_storage',
return_value=storage_mock)
return storage_mock
return fn
@pytest.mark.parametrize('fx,meas', [
(VALID_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON),
(VALID_FORECAST_AGG_JSON_60, VALID_AGG_JSON_WITH_ID),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON),
])
def test__ensure_forecast_measurement_compatibility(fx, meas):
errors = validators._ensure_forecast_measurement_compatibility(fx, meas)
assert not errors
@pytest.fixture(params=[
('variable', 'bad'), ('interval_length', 120), ('site_id', 'bad'),
('aggregate_id', 'bad')])
def update_object_params(request):
def fn(obj):
obj = deepcopy(obj)
if request.param[0] not in obj:
pytest.skip(f'{request.param[0]} not in object')
obj[request.param[0]] = request.param[1]
return obj, request.param[0]
return fn
@pytest.mark.parametrize('fx,meas', [
(VALID_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON),
(VALID_FORECAST_AGG_JSON_60, VALID_AGG_JSON_WITH_ID),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON),
])
def test__ensure_forecast_measurement_compatibility_errors(
update_object_params, fx, meas):
meas, error_key = update_object_params(meas)
errors = validators._ensure_forecast_measurement_compatibility(fx, meas)
if error_key == 'interval_length':
assert errors[error_key] == ('Must be less than or equal to forecast '
f'{error_key}.')
else:
assert errors[error_key] == f'Must match forecast {error_key}.'
@pytest.mark.parametrize('fx,obs,agg,forecast_type,include_ref_fx', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', False),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', True),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', False),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', True),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', False),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', True),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', False),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', True),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', False),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', True),
])
def test_ensure_pair_compatibility(
mock_reads, fx, obs, agg, forecast_type, include_ref_fx):
if include_ref_fx:
ref_fx = deepcopy(fx)
ref_fx['name'] = 'test reference_forecast'
else:
ref_fx = None
mock_reads(fx, obs, agg, ref_fx)
# pair will typically contain uuids but with mocked sql, truthy dicts and
# falsy None will suffice.
pair = {
'forecast': fx,
'observation': obs,
'aggregate': agg,
'reference_forecast': ref_fx,
'forecast_type': forecast_type,
}
errors = validators.ensure_pair_compatibility(pair)
assert not errors
@pytest.fixture(params=[
('variable', 'bad'), ('interval_length', 120), ('site_id', 'bad'),
('aggregate_id', 'bad'), ('axis', 'y'), ('constant_value', 13.2)])
def update_reference_params(request):
def fn(obj):
obj = deepcopy(obj)
if request.param[0] not in obj:
pytest.skip(f'{request.param[0]} not in reference forecast')
obj[request.param[0]] = request.param[1]
return obj, request.param[0]
return fn
@pytest.mark.parametrize('fx, forecast_type', [
(VALID_FORECAST_JSON, 'forecast'),
(VALID_FORECAST_AGG_JSON_60, 'forecast'),
(VALID_CDF_FORECAST_JSON, 'probabilistic_forecast'),
(VALID_CDF_SINGLE_JSON, 'probabilistic_forecast_constant_value'),
(VALID_EVENT_FORECAST_JSON, 'event_forecast'),
])
def test__ensure_forecast_reference_compatibility_errors(
update_reference_params, fx, forecast_type):
ref_fx = deepcopy(fx)
ref_fx['name'] = 'test reference_forecast'
ref_fx, error_key = update_reference_params(ref_fx)
errors = validators._ensure_forecast_reference_compatibility(
fx, ref_fx, forecast_type)
assert errors[error_key] == (
f'Must match forecast {error_key}.')
@pytest.mark.parametrize('fx,obs,agg,forecast_type', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID, 'forecast'),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None, 'probabilistic_forecast'),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value'),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None, 'event_forecast'),
])
def test_ensure_pair_compatibility_reference_errors(
update_reference_params, mock_reads, fx, obs, agg, forecast_type):
ref_fx = deepcopy(fx)
ref_fx['name'] = 'test reference_forecast'
ref_fx, error_key = update_reference_params(ref_fx)
mock_reads(fx, obs, agg, ref_fx)
# pair will typically contain uuids but with mocked sql, truthy dicts and
# falsy None will suffice.
pair = {
'forecast': fx,
'observation': obs,
'aggregate': agg,
'reference_forecast': ref_fx,
'forecast_type': forecast_type,
}
with pytest.raises(ValidationError) as e:
validators.ensure_pair_compatibility(pair)
errors = e.value.messages
assert errors['reference_forecast'][error_key] == (
f'Must match forecast {error_key}.')
assert 'observation' not in errors
assert 'aggregate' not in errors
@pytest.mark.parametrize('fx,obs,agg,forecast_type,include_ref_fx', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', False),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast', True),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', False),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'forecast', True),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', False),
(VALID_CDF_FORECAST_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast', True),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', False),
(VALID_CDF_SINGLE_JSON, VALID_OBS_JSON, None,
'probabilistic_forecast_constant_value', True),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', False),
(VALID_EVENT_FORECAST_JSON, VALID_EVENT_OBS_JSON, None,
'event_forecast', True),
])
def test_ensure_pair_compatibility_agg_obs_errors(
update_object_params, mock_reads, fx, obs, agg, forecast_type,
include_ref_fx):
if include_ref_fx:
ref_fx = deepcopy(fx)
ref_fx['name'] = 'test reference_forecast'
else:
ref_fx = None
if obs is not None:
error_field = 'observation'
dne_field = 'aggregate'
obs, error_key = update_object_params(obs)
elif agg is not None:
error_field = 'aggregate'
dne_field = 'observation'
agg, error_key = update_object_params(agg)
mock_reads(fx, obs, agg, ref_fx)
# pair will typically contain uuids but with mocked sql, truthy dicts and
# falsy None will suffice.
pair = {
'forecast': fx,
'observation': obs,
'aggregate': agg,
'reference_forecast': ref_fx,
'forecast_type': forecast_type,
}
with pytest.raises(ValidationError) as e:
validators.ensure_pair_compatibility(pair)
errors = e.value.messages
field_errors = errors[error_field]
if error_key == 'interval_length':
assert field_errors[error_key] == (
f'Must be less than or equal to forecast {error_key}.')
else:
assert field_errors[error_key] == f'Must match forecast {error_key}.'
assert dne_field not in errors
assert 'reference_forecast' not in errors
@pytest.fixture()
def mock_reads_with_failure(mocker):
def fn(failure, fx=None, obs=None, agg=None, ref_fx=None):
storage_mock = mocker.MagicMock()
if failure == 'forecast':
forecast_se = [StorageAuthError, ref_fx]
elif failure == 'reference_forecast':
forecast_se = [fx, StorageAuthError]
else:
forecast_se = [fx, ref_fx]
storage_mock.read_forecast = mocker.MagicMock(
side_effect=forecast_se)
storage_mock.read_cdf_forecast_group = mocker.MagicMock(
side_effect=forecast_se)
storage_mock.read_cdf_forecast = mocker.MagicMock(
side_effect=forecast_se)
if failure == 'observation':
storage_mock.read_observation = mocker.MagicMock(
side_effect=StorageAuthError)
else:
storage_mock.read_observation = mocker.MagicMock(return_value=obs)
if failure == 'aggregate':
storage_mock.read_aggregate = mocker.MagicMock(
side_effect=StorageAuthError)
else:
storage_mock.read_aggregate = mocker.MagicMock(return_value=agg)
mocker.patch('sfa_api.utils.validators.get_storage',
return_value=storage_mock)
return storage_mock
return fn
@pytest.mark.parametrize('fx,obs,agg,failure_mode', [
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'forecast'),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'observation'),
(VALID_FORECAST_JSON, VALID_OBS_JSON, None, 'reference_forecast'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID, 'forecast'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID, 'aggregate'),
(VALID_FORECAST_AGG_JSON_60, None, VALID_AGG_JSON_WITH_ID,
'reference_forecast'),
])
def test_ensure_pair_compatibility_object_dne(
mock_reads_with_failure, fx, obs, agg, failure_mode):
ref_fx = deepcopy(fx)
ref_fx['name'] = 'test reference_forecast'
mock_reads_with_failure(failure_mode, fx, obs, agg, ref_fx)
# pair will typically contain uuids but with mocked sql, truthy dicts and
# falsy None will suffice.
pair = {
'forecast': fx,
'observation': obs,
'aggregate': agg,
'reference_forecast': ref_fx,
'forecast_type': 'forecast',
}
with pytest.raises(ValidationError) as e:
validators.ensure_pair_compatibility(pair)
errors = e.value.messages
assert errors[failure_mode] == 'Does not exist.'
@pytest.mark.parametrize("data", [13, 17, 52])
def test_AggregateIntervalValidator_errors(data):
with pytest.raises(ValidationError):
validators.AggregateIntervalValidator()(data)
@pytest.mark.parametrize("data", [1, 3, 5, 15, 30, 60, 90])
def test_AggregateIntervalValidator(data):
validators.AggregateIntervalValidator()(data)
| 8,261 | 0 | 550 |
17e973105c7f12ca48b7215f6a40b2fd0f991575 | 1,720 | py | Python | __init__.py | Pakniat/PySimplex | 0e2a1d0b3a1c5538e123e3b669c418b808a81341 | [
"MIT"
] | 5 | 2018-07-22T17:17:10.000Z | 2021-11-13T04:11:19.000Z | __init__.py | Pakniat/PySimplex | 0e2a1d0b3a1c5538e123e3b669c418b808a81341 | [
"MIT"
] | null | null | null | __init__.py | Pakniat/PySimplex | 0e2a1d0b3a1c5538e123e3b669c418b808a81341 | [
"MIT"
] | 2 | 2018-11-16T15:46:18.000Z | 2020-04-06T18:07:57.000Z | from Tkinter import *
from simplex import Simplex
from textOperator import Operator
vertices = []
master = Tk()
master.title = "master"
Label(master, text="enter number of vertices").grid(row=0)
Label(master, text="enter complete connection without "+"'.'").grid(row=1)
Label(master, text="enter connection with "+"'.'").grid(row=2)
Label(master, text="enter connection for delete with "+"'.'").grid(row=4)
e1 = Entry(master)
e2 = Entry(master)
e3 = Entry(master)
e4 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
e3.grid(row=2, column=1)
e4.grid(row=4, column=1)
Button(master, text='Quit', command=master.quit).grid(row=9, column=0)
Button(master, text='Create', command=decision_mode).grid(row=9, column=1)
Button(master, text='Delete', command=delVertices).grid(row=10, column=1)
master.wm_title("Simplex Tree")
mainloop() | 27.301587 | 74 | 0.687791 | from Tkinter import *
from simplex import Simplex
from textOperator import Operator
vertices = []
def decision_mode():
if e2.get()=='':
create_simple_structure()
return
else:
create_complete_structure()
def create_complete_structure():
operator=Operator()
for i in range(int(e1.get())):
objGraph = Simplex(str(i + 1))
vertices.append(objGraph)
operator.create_complete_connection(e2.get(), vertices)
operator.show_structures(vertices)
return
def create_simple_structure():
operator=Operator()
for i in range(int(e1.get())):
objGraph = Simplex(str(i + 1))
vertices.append(objGraph)
operator.create_simple_connection(e3.get(), vertices)
operator.show_structures(vertices)
return
def delVertices():
operator=Operator()
if(len(e4.get()) > 2):
operator.delete_connection(e4.get(), vertices)
operator.show_structures(vertices)
return
master = Tk()
master.title = "master"
Label(master, text="enter number of vertices").grid(row=0)
Label(master, text="enter complete connection without "+"'.'").grid(row=1)
Label(master, text="enter connection with "+"'.'").grid(row=2)
Label(master, text="enter connection for delete with "+"'.'").grid(row=4)
e1 = Entry(master)
e2 = Entry(master)
e3 = Entry(master)
e4 = Entry(master)
e1.grid(row=0, column=1)
e2.grid(row=1, column=1)
e3.grid(row=2, column=1)
e4.grid(row=4, column=1)
Button(master, text='Quit', command=master.quit).grid(row=9, column=0)
Button(master, text='Create', command=decision_mode).grid(row=9, column=1)
Button(master, text='Delete', command=delVertices).grid(row=10, column=1)
master.wm_title("Simplex Tree")
mainloop() | 774 | 0 | 92 |
cb418529944176d92c51b753aa5690df508cc64a | 1,846 | py | Python | ribosome/nvim/api/rpc.py | tek/ribosome-py | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | [
"MIT"
] | null | null | null | ribosome/nvim/api/rpc.py | tek/ribosome-py | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | [
"MIT"
] | null | null | null | ribosome/nvim/api/rpc.py | tek/ribosome-py | 8bd22e549ddff1ee893d6e3a0bfba123a09e96c6 | [
"MIT"
] | null | null | null | from typing import Tuple, Any
from amino import _, Either, Map, Left, Right, do, Do
from amino.state import State
from ribosome.nvim.io.compute import NvimIO, NvimIOSuspend, NvimIOPure
from ribosome.nvim.io.api import N
from ribosome.nvim.api.function import nvim_call_function, nvim_call_tpe
from ribosome.nvim.api.command import nvim_command
from ribosome import NvimApi
@do(NvimIO[int])
@do(NvimIO[Any])
__all__ = ('plugin_name', 'api_info', 'channel_id', 'rpcrequest', 'rpcrequest_current', 'nvim_quit', 'nvim_api',
'nvim_pid',)
| 29.301587 | 112 | 0.656555 | from typing import Tuple, Any
from amino import _, Either, Map, Left, Right, do, Do
from amino.state import State
from ribosome.nvim.io.compute import NvimIO, NvimIOSuspend, NvimIOPure
from ribosome.nvim.io.api import N
from ribosome.nvim.api.function import nvim_call_function, nvim_call_tpe
from ribosome.nvim.api.command import nvim_command
from ribosome import NvimApi
def plugin_name() -> NvimIO[str]:
return N.delay(_.name)
def api_info() -> NvimIO[Tuple[int, dict]]:
def cons(data: Any) -> Either[str, Tuple[int, Map[str, Any]]]:
return (
Left(f'not a tuple: {data}')
if not isinstance(data, (list, tuple)) else
Left(f'invalid tuple size: {data}')
if not len(data) == 2 else
Left(f'channel is not an int: {data}')
if not isinstance(data[0], int) else
Left(f'metadata is not a dict: {data}')
if not isinstance(data[1], dict) else
Right(data).map2(lambda a, b: (a, Map(b)))
)
return N.read_cons_strict('nvim_get_api_info', cons)
@do(NvimIO[int])
def channel_id() -> Do:
channel, metadata = yield api_info()
return channel
def rpcrequest(channel: int, method: str, *args: str) -> NvimIO[Any]:
return nvim_call_function('rpcrequest', channel, method, args)
@do(NvimIO[Any])
def rpcrequest_current(method: str, *args: str) -> Do:
channel = yield channel_id()
yield rpcrequest(channel, method, *args)
def nvim_quit() -> NvimIO[None]:
return nvim_command('qall!')
def nvim_api() -> NvimIO[NvimApi]:
return NvimIOSuspend.cons(State.get().map(NvimIOPure))
def nvim_pid() -> NvimIO[int]:
return nvim_call_tpe(int, 'getpid')
__all__ = ('plugin_name', 'api_info', 'channel_id', 'rpcrequest', 'rpcrequest_current', 'nvim_quit', 'nvim_api',
'nvim_pid',)
| 1,106 | 0 | 182 |
dadd2715e447ed38d24d7d73015964802b5bb91d | 2,670 | py | Python | Linked Lists/add_two_numbers_two.py | fredricksimi/leetcode | f6352c26914ca77f915f5994746ecf0b36efc89b | [
"MIT"
] | null | null | null | Linked Lists/add_two_numbers_two.py | fredricksimi/leetcode | f6352c26914ca77f915f5994746ecf0b36efc89b | [
"MIT"
] | null | null | null | Linked Lists/add_two_numbers_two.py | fredricksimi/leetcode | f6352c26914ca77f915f5994746ecf0b36efc89b | [
"MIT"
] | 1 | 2021-12-05T12:27:46.000Z | 2021-12-05T12:27:46.000Z | """
Add Two Numbers II: Leetcode 445
You are given two non-empty linked lists representing two non-negative integers.
The most significant digit comes first and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
Follow up:
What if you cannot modify the input lists? In other words, reversing the lists is not allowed.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# 0(max(n+m)) time | 0(n+m) space
"""
Example:
Input: (7 -> 2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 8 -> 0 -> 7
input:
[7,2,4,3]
[5,6,4]
[9,8,7,6,6,7,8,9]
[9,8,7,6,6,7,8,9]
[1,2,3,4,5,5,6,9]
[1,2,3,4,5,5,6,9]
output:
[7,8,0,7]
[7,8,0,7]
[1,9,7,5,3,3,5,7,8]
[2,4,6,9,1,1,3,8]
[1,5]
"""
| 26.969697 | 138 | 0.527715 | """
Add Two Numbers II: Leetcode 445
You are given two non-empty linked lists representing two non-negative integers.
The most significant digit comes first and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.
You may assume the two numbers do not contain any leading zero, except the number 0 itself.
Follow up:
What if you cannot modify the input lists? In other words, reversing the lists is not allowed.
"""
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
# 0(max(n+m)) time | 0(n+m) space
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
result = ListNode(-1)
stack_one = []
stack_two = []
# fill up the stacks
item_one = l1
while item_one:
stack_one.append(item_one.val)
item_one = item_one.next
item_two = l2
while item_two:
stack_two.append(item_two.val)
item_two = item_two.next
len_one = len(stack_one)
len_two = len(stack_two)
max_len = max(len_one, len_two)
# addition
i = 0
carry = 0
node_after_head = None
while i <= max_len: # iterate till max_len in order to handle carries
# get values
val_one = 0
if i < len_one:
val_one = stack_one.pop()
val_two = 0
if i < len_two:
val_two = stack_two.pop()
# arithmetic
total = val_one + val_two + carry
carry = 0
if total > 9:
total -= 10 # eg: when total = 19 : add (19-10) and carry 1
carry = 1
# add nodes to the result
# if we are still adding or we have one left carry(eg: 99 + 99)
if i < max_len or total > 0:
node = ListNode(total)
if node_after_head:
node.next = node_after_head
result.next = node
node_after_head = node
else:
result.next = node
node_after_head = node
i += 1
# skip the first node (start at node_after_head)
return result.next
"""
Example:
Input: (7 -> 2 -> 4 -> 3) + (5 -> 6 -> 4)
Output: 7 -> 8 -> 0 -> 7
input:
[7,2,4,3]
[5,6,4]
[9,8,7,6,6,7,8,9]
[9,8,7,6,6,7,8,9]
[1,2,3,4,5,5,6,9]
[1,2,3,4,5,5,6,9]
output:
[7,8,0,7]
[7,8,0,7]
[1,9,7,5,3,3,5,7,8]
[2,4,6,9,1,1,3,8]
[1,5]
"""
| 1,678 | -6 | 48 |
8f27e1b6b94aefcfd70d084f82b2e0b4e27cc1c0 | 1,896 | py | Python | networkx/watts.py | Yili0616/graph-analytics-comparison | 55ea2458e487885f95fef411cce0521eeb322882 | [
"Apache-2.0"
] | null | null | null | networkx/watts.py | Yili0616/graph-analytics-comparison | 55ea2458e487885f95fef411cce0521eeb322882 | [
"Apache-2.0"
] | null | null | null | networkx/watts.py | Yili0616/graph-analytics-comparison | 55ea2458e487885f95fef411cce0521eeb322882 | [
"Apache-2.0"
] | null | null | null | import networkx as nx
import time as t
# Generating Watts_strogatz_graph using networkx.
# Four parameters:
# n (int) – The number of nodes
# k (int) – Each node is joined with its k nearest neighbors in a ring topology.
# p (float) – The probability of rewiring each edge
# seed (int, optional) – Seed for random number generator (default=None)
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "1st: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "2nd: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "3rd: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "4th: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "5th: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "1st: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "2nd: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "3rd: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "4th: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "5th: watts_strogatz_graph 1000000,200"
print time2-time1 | 21.303371 | 81 | 0.700949 | import networkx as nx
import time as t
# Generating Watts_strogatz_graph using networkx.
# Four parameters:
# n (int) – The number of nodes
# k (int) – Each node is joined with its k nearest neighbors in a ring topology.
# p (float) – The probability of rewiring each edge
# seed (int, optional) – Seed for random number generator (default=None)
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "1st: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "2nd: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "3rd: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "4th: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(100000,200,0.5)
time2 = t.time()
print "5th: watts_strogatz_graph 100000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "1st: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "2nd: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "3rd: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "4th: watts_strogatz_graph 1000000,200"
print time2-time1
time1 = t.time()
G = nx.watts_strogatz_graph(1000000,200,0.5)
time2 = t.time()
print "5th: watts_strogatz_graph 1000000,200"
print time2-time1 | 0 | 0 | 0 |
25b176845f58f23df75e0f18d4e7e713f0525afe | 7,273 | py | Python | dnp3stalker_serial.py | cutaway-security/dnp3stalker | 251d3827e9ce301fa6a21f98435c991ed6640eb8 | [
"MIT"
] | 2 | 2022-01-31T09:33:31.000Z | 2022-02-10T05:24:40.000Z | dnp3stalker_serial.py | cutaway-security/dnp3stalker | 251d3827e9ce301fa6a21f98435c991ed6640eb8 | [
"MIT"
] | null | null | null | dnp3stalker_serial.py | cutaway-security/dnp3stalker | 251d3827e9ce301fa6a21f98435c991ed6640eb8 | [
"MIT"
] | null | null | null | ###############################
# Import Python modules
###############################
import sys,os
# NOTE: Uncomment these lines if you are putting the modules in the local directory
#sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pyserial.serial'))
#sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'crcmod'))
import serial
import crcmod.predefined
import time
####################
# DNP Command Setup
# NOTE: Use strings in hex where you can to be consistent with
# bytes.fromhex(str) for functions.
####################
src_address = 1023
dst_address = 1
SRC_ADDR = src_address.to_bytes(2,'little')
DST_ADDR = dst_address.to_bytes(2,'little')
DNP_HEADER = '0564'
####################
# Helper Functions DNP Commands
####################
# Generate DNP3 CRC
# Build Header Packet
# Build Object Packet
####################
# Full DNP Commands
####################
# Data Link Layer Control Codes
## Producer
DLLCC_P_ACK = '80'
DLLCC_P_NACK = '81'
DLLCC_P_LINK_STATUS = '8B'
DLLCC_P_NOT_SUPPORTED = '8F'
DLLCC_P_RESET_LINK_STATES = 'C0'
DLLCC_P_UNCONFIRMED_USER_DATA = 'C4'
DLLCC_P_REQUEST_LINK_STATUS = 'C9'
DLLCC_P_TEST_LINK_STATES = 'D2'
DLLCC_P_CONFIRMED_USER_DATA_D = 'D3'
DLLCC_P_CONFIRMED_USER_DATA_F = 'F3'
## Consumer
DLLCC_O_ACK = '00'
DLLCC_O_NACK = '01'
DLLCC_O_LINK_STATUS = 'OF'
DLLCC_O_NOT_SUPPORTED = '0F'
DLLCC_O_RESET_LINK_STATES = '40'
DLLCC_O_UNCONFIRMED_USER_DATA = '44'
DLLCC_O_REQUEST_LINK_STATUS = '49'
DLLCC_O_TEST_LINK_STATES = '52'
DLLCC_O_CONFIRMED_USER_DATA_D = '53'
DLLCC_O_CONFIRMED_USER_DATA_F = '73'
# Function Codes
FC_CONFIRM = '00'
FC_READ = '01'
FC_WRITE = '02'
FC_SELECT = '03'
FC_OPERATOR = '04'
FC_DIR_OPERATE = '05'
FC_DIR_OPERATE_NO_RESP = '06'
FC_FREEZE = '07'
FC_FREEZE_NO_RESP = '08'
FC_FREEZE_CLEAR = '09'
FC_FREEZE_CLEAR_NO_RESP = '0A'
FC_FREEZE_AT_TIME = '0B'
FC_FREEZE_AT_TIME_NO_RESP = '0C'
FC_COLD_RESTART = '0D'
FC_WARM_RESTART = '0E'
FC_INIT_DATA = '0F'
FC_INIT_APP = '10'
FC_START_APP = '11'
FC_STOP_APP = '12'
FC_SAVE_CONFIG = '13'
FC_ENABLE_UNSOL = '14'
FC_DISABLE_UNSOL = '15'
FC_ASSIGN_CLASS = '16'
FC_DELAY_MEASURE = '17'
FC_RECORD_TIME = '18'
FC_OPEN_FILE = '19'
FC_CLOSE_FILE = '1A'
FC_DELETE_FILE = '1B'
FC_FILE_INFO = '1C'
FC_AUTH_FILE = '1D'
FC_ABORT_FILE = '1E'
FC_ACTIVATE_CONFIG = '1F'
FC_AUTH_REQ = '20'
FC_AUTH_REQ_NO_ACK = '21'
FC_RESP = '81'
FC_UNSOL_RESP = '82'
FC_AUTH_RESP = '83'
TCAC_FIRST_FIN = 'C0C0'
# Broadcast Commands
COLD_RESTART_BROADCAST = '056408C4FFFFFFFF4451C0C00D9C86'
LINK_STATUS_BROADCAST = '056405C9FFFFFFFF46C9'
# Build commands
LINK_STATUS_DIRECT = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_REQUEST_LINK_STATUS)
RESET_LINK_STATE_DIRECT = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_RESET_LINK_STATES)
TEST_LINK_STATE_DIRECT = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_TEST_LINK_STATES)
UNCONFIRMED_USER_DATA = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_UNCONFIRMED_USER_DATA)
COLD_RESTART_OBJ = build_dnp_object(TCAC_FIRST_FIN + FC_COLD_RESTART)
WARM_RESTART_OBJ = build_dnp_object(TCAC_FIRST_FIN + FC_WARM_RESTART)
# Wrapper for sending broadcast messages
# s = open serial port
# cmd = string of hex bytes
# Wrapper for sending direct messages
# s = open serial port
# cmd = byte string built from build_dnp_header function
# cmd = byte string built from build_dnp_object
###############
# Setup Serial
###############
port = '/dev/ttyUSB0'
baudrate = 19200
timeout = 1
bytesize = 8
stopbits = serial.STOPBITS_ONE
serialPort = serial.Serial(port=port, baudrate=baudrate,
bytesize=bytesize, timeout=timeout, stopbits=stopbits)
response = b''
print('Starting DNP3 Stalker. Cntl-C to stop sending commands.\n')
while True:
try:
if len(sys.argv) < 2:
print(' Provide a command. Read the code.\n')
break
if sys.argv[1] == 'COLD_BROADCAST': send_broadcast(serialPort, COLD_RESTART_BROADCAST)
if sys.argv[1] == 'LINK_BROADCAST': send_broadcast(serialPort, LINK_STATUS_BROADCAST)
if sys.argv[1] == 'LINK_STAT': send_direct(serialPort, LINK_STATUS_DIRECT)
if sys.argv[1] == 'COLD_RESTART': send_direct(serialPort, UNCONFIRMED_USER_DATA, obj=COLD_RESTART_OBJ)
if sys.argv[1] == 'WARM_RESTART': send_direct(serialPort, UNCONFIRMED_USER_DATA, obj=WARM_RESTART_OBJ)
time.sleep(1)
# TODO: Remove old methods
'''
serialPort.write(bytes.fromhex(COLD_RESTART_BROADCAST))
time.sleep(1)
response = serialPort.read(size=200)
if response: print(response)
time.sleep(1)
serialPort.write(build_dnp_data_header(DNP_HEADER,src_address,dst_address,'C9'))
print("%s"%(build_dnp_data_header(DNP_HEADER,src_address,dst_address,'C9').hex()))
time.sleep(1)
response = serialPort.read(size=200)
if response: print(response)
time.sleep(1)
'''
except KeyboardInterrupt:
break
serialPort.close()
| 34.799043 | 137 | 0.637976 | ###############################
# Import Python modules
###############################
import sys,os
# NOTE: Uncomment these lines if you are putting the modules in the local directory
#sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'pyserial.serial'))
#sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'crcmod'))
import serial
import crcmod.predefined
import time
####################
# DNP Command Setup
# NOTE: Use strings in hex where you can to be consistent with
# bytes.fromhex(str) for functions.
####################
src_address = 1023
dst_address = 1
SRC_ADDR = src_address.to_bytes(2,'little')
DST_ADDR = dst_address.to_bytes(2,'little')
DNP_HEADER = '0564'
####################
# Helper Functions DNP Commands
####################
# Generate DNP3 CRC
def gen_crc(data):
# Import DNP3 CRC
# NOTE: Normal use of CRCMOD and CRCCHECK did not generate the CRC correctly
# This is the ONLY method that generated the CRC correctly
# Online CRC Check: https://www.lammertbies.nl/comm/info/crc-calculation
crcdnp = crcmod.predefined.mkCrcFun('crc-16-dnp')
return crcdnp(data).to_bytes(2,'little')
# Build Header Packet
def build_dnp_header(head,src,dst,cntl):
# Length will have to be updated if additional data is placed in packet
plen = 5
packet = bytes.fromhex(head) + plen.to_bytes(1,'little') + bytes.fromhex(cntl) + dst.to_bytes(2,'little') + src.to_bytes(2,'little')
crc = gen_crc(packet)
return packet + crc
# Build Object Packet
def build_dnp_object(data):
# Length will have to be updated if additional data is placed in packet
plen = 5
packet = bytes.fromhex(data)
crc = gen_crc(packet)
return packet + crc
####################
# Full DNP Commands
####################
# Data Link Layer Control Codes
## Producer
DLLCC_P_ACK = '80'
DLLCC_P_NACK = '81'
DLLCC_P_LINK_STATUS = '8B'
DLLCC_P_NOT_SUPPORTED = '8F'
DLLCC_P_RESET_LINK_STATES = 'C0'
DLLCC_P_UNCONFIRMED_USER_DATA = 'C4'
DLLCC_P_REQUEST_LINK_STATUS = 'C9'
DLLCC_P_TEST_LINK_STATES = 'D2'
DLLCC_P_CONFIRMED_USER_DATA_D = 'D3'
DLLCC_P_CONFIRMED_USER_DATA_F = 'F3'
## Consumer
DLLCC_O_ACK = '00'
DLLCC_O_NACK = '01'
DLLCC_O_LINK_STATUS = 'OF'
DLLCC_O_NOT_SUPPORTED = '0F'
DLLCC_O_RESET_LINK_STATES = '40'
DLLCC_O_UNCONFIRMED_USER_DATA = '44'
DLLCC_O_REQUEST_LINK_STATUS = '49'
DLLCC_O_TEST_LINK_STATES = '52'
DLLCC_O_CONFIRMED_USER_DATA_D = '53'
DLLCC_O_CONFIRMED_USER_DATA_F = '73'
# Function Codes
FC_CONFIRM = '00'
FC_READ = '01'
FC_WRITE = '02'
FC_SELECT = '03'
FC_OPERATOR = '04'
FC_DIR_OPERATE = '05'
FC_DIR_OPERATE_NO_RESP = '06'
FC_FREEZE = '07'
FC_FREEZE_NO_RESP = '08'
FC_FREEZE_CLEAR = '09'
FC_FREEZE_CLEAR_NO_RESP = '0A'
FC_FREEZE_AT_TIME = '0B'
FC_FREEZE_AT_TIME_NO_RESP = '0C'
FC_COLD_RESTART = '0D'
FC_WARM_RESTART = '0E'
FC_INIT_DATA = '0F'
FC_INIT_APP = '10'
FC_START_APP = '11'
FC_STOP_APP = '12'
FC_SAVE_CONFIG = '13'
FC_ENABLE_UNSOL = '14'
FC_DISABLE_UNSOL = '15'
FC_ASSIGN_CLASS = '16'
FC_DELAY_MEASURE = '17'
FC_RECORD_TIME = '18'
FC_OPEN_FILE = '19'
FC_CLOSE_FILE = '1A'
FC_DELETE_FILE = '1B'
FC_FILE_INFO = '1C'
FC_AUTH_FILE = '1D'
FC_ABORT_FILE = '1E'
FC_ACTIVATE_CONFIG = '1F'
FC_AUTH_REQ = '20'
FC_AUTH_REQ_NO_ACK = '21'
FC_RESP = '81'
FC_UNSOL_RESP = '82'
FC_AUTH_RESP = '83'
TCAC_FIRST_FIN = 'C0C0'
# Broadcast Commands
COLD_RESTART_BROADCAST = '056408C4FFFFFFFF4451C0C00D9C86'
LINK_STATUS_BROADCAST = '056405C9FFFFFFFF46C9'
# Build commands
LINK_STATUS_DIRECT = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_REQUEST_LINK_STATUS)
RESET_LINK_STATE_DIRECT = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_RESET_LINK_STATES)
TEST_LINK_STATE_DIRECT = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_TEST_LINK_STATES)
UNCONFIRMED_USER_DATA = build_dnp_header(DNP_HEADER,src_address,dst_address,DLLCC_P_UNCONFIRMED_USER_DATA)
COLD_RESTART_OBJ = build_dnp_object(TCAC_FIRST_FIN + FC_COLD_RESTART)
WARM_RESTART_OBJ = build_dnp_object(TCAC_FIRST_FIN + FC_WARM_RESTART)
# Wrapper for sending broadcast messages
# s = open serial port
# cmd = string of hex bytes
def send_broadcast(s, cmd):
s.write(bytes.fromhex(cmd))
time.sleep(1)
r = s.read(size=200)
if r: print(r)
time.sleep(1)
# Wrapper for sending direct messages
# s = open serial port
# cmd = byte string built from build_dnp_header function
# cmd = byte string built from build_dnp_object
def send_direct(s, cmd, obj=b''):
# If there are DNP3 objects, update the length byte
# NOTE: DNP3 objects should be completely formed with CRC
len_index = 2
if obj:
# using a bytearray might be more understandable
# Compute new length byte and remove CRC from header
cmd = cmd[:len_index] + (cmd[len_index] + (len(obj) - 2)).to_bytes(1,'little') + cmd[len_index + 1:-2]
# Recompute CRC and update command
crc = gen_crc(cmd)
cmd = cmd + crc
s.write(cmd + obj)
time.sleep(1)
r = s.read(size=200)
if r: print(r)
time.sleep(1)
###############
# Setup Serial
###############
port = '/dev/ttyUSB0'
baudrate = 19200
timeout = 1
bytesize = 8
stopbits = serial.STOPBITS_ONE
serialPort = serial.Serial(port=port, baudrate=baudrate,
bytesize=bytesize, timeout=timeout, stopbits=stopbits)
response = b''
print('Starting DNP3 Stalker. Cntl-C to stop sending commands.\n')
while True:
try:
if len(sys.argv) < 2:
print(' Provide a command. Read the code.\n')
break
if sys.argv[1] == 'COLD_BROADCAST': send_broadcast(serialPort, COLD_RESTART_BROADCAST)
if sys.argv[1] == 'LINK_BROADCAST': send_broadcast(serialPort, LINK_STATUS_BROADCAST)
if sys.argv[1] == 'LINK_STAT': send_direct(serialPort, LINK_STATUS_DIRECT)
if sys.argv[1] == 'COLD_RESTART': send_direct(serialPort, UNCONFIRMED_USER_DATA, obj=COLD_RESTART_OBJ)
if sys.argv[1] == 'WARM_RESTART': send_direct(serialPort, UNCONFIRMED_USER_DATA, obj=WARM_RESTART_OBJ)
time.sleep(1)
# TODO: Remove old methods
'''
serialPort.write(bytes.fromhex(COLD_RESTART_BROADCAST))
time.sleep(1)
response = serialPort.read(size=200)
if response: print(response)
time.sleep(1)
serialPort.write(build_dnp_data_header(DNP_HEADER,src_address,dst_address,'C9'))
print("%s"%(build_dnp_data_header(DNP_HEADER,src_address,dst_address,'C9').hex()))
time.sleep(1)
response = serialPort.read(size=200)
if response: print(response)
time.sleep(1)
'''
except KeyboardInterrupt:
break
serialPort.close()
| 1,544 | 0 | 110 |
3f3d53962a5ae34a3575bee65e251ef65db6c287 | 1,720 | py | Python | Backend/core/security/auth.py | TheDescend/elevatorbot | 0909ec9ba213480bdf7f790c3d115dd8c4f3ae17 | [
"MIT"
] | null | null | null | Backend/core/security/auth.py | TheDescend/elevatorbot | 0909ec9ba213480bdf7f790c3d115dd8c4f3ae17 | [
"MIT"
] | 41 | 2022-01-12T11:10:40.000Z | 2022-03-22T09:47:25.000Z | Backend/core/security/auth.py | TheDescend/elevatorbot | 0909ec9ba213480bdf7f790c3d115dd8c4f3ae17 | [
"MIT"
] | null | null | null | from datetime import timedelta
from typing import Optional
from fastapi import HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import jwt
from passlib.context import CryptContext
# defining algorithms
from Shared.functions.helperFunctions import get_now_with_tz
from Shared.functions.readSettingsFile import get_setting
_SECRET_KEY = None
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
CREDENTIALS_EXCEPTION = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
# get the secret key from a file if exists, otherwise generate one
async def get_secret_key():
"""Get the secret key used to create a jwt token"""
return get_setting("SECRET")
# define auth schemes
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/auth/token")
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""Make sure the hashed password is correct"""
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(plain_password: str) -> str:
"""Hash the password"""
return pwd_context.hash(plain_password)
async def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
"""Create a jwt token to authenticate with"""
to_encode = data.copy()
if expires_delta:
expire = get_now_with_tz() + expires_delta
else:
expire = get_now_with_tz() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, await get_secret_key(), algorithm=ALGORITHM)
return encoded_jwt
| 29.655172 | 92 | 0.758721 | from datetime import timedelta
from typing import Optional
from fastapi import HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from jose import jwt
from passlib.context import CryptContext
# defining algorithms
from Shared.functions.helperFunctions import get_now_with_tz
from Shared.functions.readSettingsFile import get_setting
_SECRET_KEY = None
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 30
CREDENTIALS_EXCEPTION = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
# get the secret key from a file if exists, otherwise generate one
async def get_secret_key():
"""Get the secret key used to create a jwt token"""
return get_setting("SECRET")
# define auth schemes
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/auth/token")
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""Make sure the hashed password is correct"""
return pwd_context.verify(plain_password, hashed_password)
def get_password_hash(plain_password: str) -> str:
"""Hash the password"""
return pwd_context.hash(plain_password)
async def create_access_token(data: dict, expires_delta: Optional[timedelta] = None) -> str:
"""Create a jwt token to authenticate with"""
to_encode = data.copy()
if expires_delta:
expire = get_now_with_tz() + expires_delta
else:
expire = get_now_with_tz() + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, await get_secret_key(), algorithm=ALGORITHM)
return encoded_jwt
| 0 | 0 | 0 |
a184d6d309bb029c17bb383b819675849e631154 | 2,912 | py | Python | apps/2d/euler/test_exact2/run_error.py | dcseal/finess | 766e583ae9e84480640c7c3b3c157bf40ab87fe4 | [
"BSD-3-Clause"
] | null | null | null | apps/2d/euler/test_exact2/run_error.py | dcseal/finess | 766e583ae9e84480640c7c3b3c157bf40ab87fe4 | [
"BSD-3-Clause"
] | null | null | null | apps/2d/euler/test_exact2/run_error.py | dcseal/finess | 766e583ae9e84480640c7c3b3c157bf40ab87fe4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import with_statement
from contextlib import closing
from subprocess import call, Popen, PIPE
import os
from math import log,sqrt
import numpy as np
def main( ):
'''Write some help documentation here
'''
print "# leading comments can be given a '#' character"
my_dictionary = {}
old_err = i = 0
old_err2 = 0
while( 1 ):
directory_num = my_dictionary['dir_num'] = i
folder = (os.getcwd() + '/output_%(dir_num)03i/') % my_dictionary
# print folder
if( not os.path.exists(folder) ):
print 'did not find folder: %s' % folder
break
my_dictionary['curr_folder'] = folder
# we want to do:
# data = open('dogpack.data','w')
# print >> data, dogpack_data_template % { 'mx': mx_now, 'ts_method': ts_method}
# data.close()
# and we avoid the .close() (even in case of exception) with 'with':
directory_num = i
try:
qex = np.loadtxt(folder + "/q0000.dat")[1:]
qapp = np.loadtxt(folder + "/q0001.dat")[1:]
except IOError:
print('''Did not find the data file.
Please Wait for simulation to finish running.''')
break
qlength = len(qex)/5
m = sqrt(qlength)
dx = dy = 10.0/m
print 'm = %(mm)d' % {'mm':m}
qex = qex[:qlength] # only density for this error
qapp = qapp[:qlength] # only density
diff = qex - qapp
new_err = sum(abs(diff)) * dx * dy /100.0
new_err2 = max(abs(diff)) # / max(abs(qex))
r1 = 'L1-error = %(new).3e; ' % {'old': old_err, 'new' : new_err}
if( old_err > 0 and new_err > 0 ):
result = r1 + ' log2(ratio) = %(rat).3f' % \
{'rat' : log( (old_err/new_err), 2) }
else:
result = r1 + ' log2(ratio) = %(rat).3f' % \
{'old' : old_err, 'new' : new_err, 'rat' : (old_err/new_err) }
r2 = 'Linf-error = %(new).3e; ' % {'old': old_err2, 'new' : new_err2}
if( old_err2 > 0 and new_err2 > 0 ):
result2 = r2 + ' log2(ratio) = %(rat).3f' % \
{'rat' : log( (old_err2/new_err2), 2) }
else:
result2 = r2 + ' log2(ratio) = %(rat).3f' % \
{'old' : old_err2, 'new' : new_err2, 'rat' : (old_err2/new_err2) }
# This is exactly the format I want:
#{\normalsize $25$} & {\normalsize $1.747\times 10^{-4}$} & {\normalsize --} & {\normalsize $8.292\times 10^{-5}$} & {\normalsize --} \\
print result
print result2
old_err = new_err
old_err2 = new_err2
i = i + 1
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(
usage='''%%prog (-h |
%s''' % main.__doc__)
opts, args = parser.parse_args()
main( )
| 30.652632 | 144 | 0.519231 | #!/usr/bin/env python
from __future__ import with_statement
from contextlib import closing
from subprocess import call, Popen, PIPE
import os
from math import log,sqrt
import numpy as np
def main( ):
'''Write some help documentation here
'''
print "# leading comments can be given a '#' character"
my_dictionary = {}
old_err = i = 0
old_err2 = 0
while( 1 ):
directory_num = my_dictionary['dir_num'] = i
folder = (os.getcwd() + '/output_%(dir_num)03i/') % my_dictionary
# print folder
if( not os.path.exists(folder) ):
print 'did not find folder: %s' % folder
break
my_dictionary['curr_folder'] = folder
# we want to do:
# data = open('dogpack.data','w')
# print >> data, dogpack_data_template % { 'mx': mx_now, 'ts_method': ts_method}
# data.close()
# and we avoid the .close() (even in case of exception) with 'with':
directory_num = i
try:
qex = np.loadtxt(folder + "/q0000.dat")[1:]
qapp = np.loadtxt(folder + "/q0001.dat")[1:]
except IOError:
print('''Did not find the data file.
Please Wait for simulation to finish running.''')
break
qlength = len(qex)/5
m = sqrt(qlength)
dx = dy = 10.0/m
print 'm = %(mm)d' % {'mm':m}
qex = qex[:qlength] # only density for this error
qapp = qapp[:qlength] # only density
diff = qex - qapp
new_err = sum(abs(diff)) * dx * dy /100.0
new_err2 = max(abs(diff)) # / max(abs(qex))
r1 = 'L1-error = %(new).3e; ' % {'old': old_err, 'new' : new_err}
if( old_err > 0 and new_err > 0 ):
result = r1 + ' log2(ratio) = %(rat).3f' % \
{'rat' : log( (old_err/new_err), 2) }
else:
result = r1 + ' log2(ratio) = %(rat).3f' % \
{'old' : old_err, 'new' : new_err, 'rat' : (old_err/new_err) }
r2 = 'Linf-error = %(new).3e; ' % {'old': old_err2, 'new' : new_err2}
if( old_err2 > 0 and new_err2 > 0 ):
result2 = r2 + ' log2(ratio) = %(rat).3f' % \
{'rat' : log( (old_err2/new_err2), 2) }
else:
result2 = r2 + ' log2(ratio) = %(rat).3f' % \
{'old' : old_err2, 'new' : new_err2, 'rat' : (old_err2/new_err2) }
# This is exactly the format I want:
#{\normalsize $25$} & {\normalsize $1.747\times 10^{-4}$} & {\normalsize --} & {\normalsize $8.292\times 10^{-5}$} & {\normalsize --} \\
print result
print result2
old_err = new_err
old_err2 = new_err2
i = i + 1
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser(
usage='''%%prog (-h |
%s''' % main.__doc__)
opts, args = parser.parse_args()
main( )
| 0 | 0 | 0 |
657a7dd18dc78bde9c450dd82ec8401b84f053e5 | 2,150 | py | Python | regression-tests/sparktkregtests/testcases/frames/entropy_test.py | lewisc/spark-tk | 5548fc925b5c278263cbdebbd9e8c7593320c2f4 | [
"ECL-2.0",
"Apache-2.0"
] | 34 | 2016-05-20T22:26:05.000Z | 2022-01-21T12:55:13.000Z | regression-tests/sparktkregtests/testcases/frames/entropy_test.py | aayushidwivedi01/spark-tk-old | fcf25f86498ac416cce77de0db4cf0aa503d20ac | [
"ECL-2.0",
"Apache-2.0"
] | 70 | 2016-06-28T01:11:21.000Z | 2021-03-15T21:40:01.000Z | regression-tests/sparktkregtests/testcases/frames/entropy_test.py | aayushidwivedi01/spark-tk-old | fcf25f86498ac416cce77de0db4cf0aa503d20ac | [
"ECL-2.0",
"Apache-2.0"
] | 34 | 2016-04-21T22:25:22.000Z | 2020-10-06T09:23:43.000Z | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test Shannon entropy calculations """
import unittest
import math
from sparktkregtests.lib import sparktk_test
if __name__ == '__main__':
unittest.main()
| 34.126984 | 75 | 0.626047 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Test Shannon entropy calculations """
import unittest
import math
from sparktkregtests.lib import sparktk_test
class EntropyTest(sparktk_test.SparkTKTestCase):
def test_entropy_coin_flip(self):
""" Get entropy on balanced coin flip. """
# initialize data and expected result
frame_load = 10 * [['H'], ['T']]
expected = math.log(2)
# create the frame
frame = self.context.frame.create(frame_load,
schema=[("data", str)])
# call the entropy function
computed_entropy = frame.entropy("data")
# test that we get the expected result
self.assertAlmostEqual(computed_entropy,
expected, delta=.001)
def test_entropy_exponential(self):
""" Get entropy on exponential distribution. """
frame_load = [[0, 1], [1, 2], [2, 4], [4, 8]]
# Expected result from an on-line entropy calculator in base 2
expected = 1.640223928941852 * math.log(2)
# create frame
frame = self.context.frame.create(frame_load,
schema=[("data", int),
("weight", int)])
# call the entropy function to calculate
computed_entropy = frame.entropy("data", "weight")
# compare our sparktk result with the expected result
self.assertAlmostEqual(computed_entropy, expected)
if __name__ == '__main__':
unittest.main()
| 0 | 1,335 | 23 |
8119679152b7a4909c61b5933ecdc065990a71d0 | 91 | py | Python | torauth/utils/base64_to_hex.py | tonlabs/tor-service | 1d7e15c20277202927e9869f73094fec7077bd38 | [
"Apache-2.0"
] | 4 | 2021-01-25T08:22:57.000Z | 2022-02-01T20:39:00.000Z | torauth/utils/base64_to_hex.py | tonlabs/tor-service | 1d7e15c20277202927e9869f73094fec7077bd38 | [
"Apache-2.0"
] | 1 | 2021-03-31T19:09:08.000Z | 2021-04-05T10:20:06.000Z | torauth/utils/base64_to_hex.py | tonlabs/tor-service | 1d7e15c20277202927e9869f73094fec7077bd38 | [
"Apache-2.0"
] | null | null | null | import base64
| 15.166667 | 44 | 0.769231 | import base64
def base64_to_hex(base64str):
return base64.b64decode(base64str).hex()
| 53 | 0 | 23 |
36b90b315ef4fd8a8f9e070a1d37c1baa1d4aca4 | 476 | py | Python | qcloudsdkmonitor/UnbindAlarmRuleReceiversRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkmonitor/UnbindAlarmRuleReceiversRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkmonitor/UnbindAlarmRuleReceiversRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
| 29.75 | 91 | 0.710084 | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class UnbindAlarmRuleReceiversRequest(Request):
def __init__(self):
super(UnbindAlarmRuleReceiversRequest, self).__init__(
'monitor', 'qcloudcliV1', 'UnbindAlarmRuleReceivers', 'monitor.api.qcloud.com')
def get_alarmRuleId(self):
return self.get_params().get('alarmRuleId')
def set_alarmRuleId(self, alarmRuleId):
self.add_param('alarmRuleId', alarmRuleId)
| 279 | 26 | 104 |
6a5ef23248d839ff460760cc621d107dff8127a8 | 569 | py | Python | dl_osm_from_extents.py | jamaps/shell_scripts | 439dae46e8c9b6f8e950ac442bc8a4dd477eff9b | [
"MIT"
] | 9 | 2016-10-22T18:37:18.000Z | 2021-07-15T23:36:33.000Z | dl_osm_from_extents.py | jamaps/shell_scripts | 439dae46e8c9b6f8e950ac442bc8a4dd477eff9b | [
"MIT"
] | null | null | null | dl_osm_from_extents.py | jamaps/shell_scripts | 439dae46e8c9b6f8e950ac442bc8a4dd477eff9b | [
"MIT"
] | 3 | 2016-10-24T18:39:27.000Z | 2020-07-05T15:30:20.000Z | import subprocess
# e.g.
dl_osm_from_extents(-77,-78,45,46)
| 29.947368 | 114 | 0.606327 | import subprocess
def dl_osm_from_extents(xmax, xmin, ymax, ymin):
# overpass url for grabbing data
url = 'http://overpass-api.de/api/map?bbox=' + str(xmin) + ',' + str(ymin) + ',' + str(xmax) + ',' + str(ymax)
# send the request
subprocess.call(["wget", url])
# temp name, since it returns it as this string
temp_name = 'map?bbox=' + str(xmin) + ',' + str(ymin) + ',' + str(xmax) + ',' + str(ymax)
# rename to map.osm.xml for future use!
subprocess.call(["mv", temp_name, "map.osm.xml"])
# e.g.
dl_osm_from_extents(-77,-78,45,46)
| 485 | 0 | 23 |
463bdc1f0421a7a16b4da0c0f5f38e2480fbd25e | 300 | py | Python | pythonProject/03al91Problema_Parametros_mutaveis_em_funcao/Problema_Parametros_mutaveis_em_funcao2.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | pythonProject/03al91Problema_Parametros_mutaveis_em_funcao/Problema_Parametros_mutaveis_em_funcao2.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null | pythonProject/03al91Problema_Parametros_mutaveis_em_funcao/Problema_Parametros_mutaveis_em_funcao2.py | D-Wolter/PycharmProjects | c8d6144efa30261bff72a3e0414a0d80f6730f9b | [
"MIT"
] | null | null | null |
clientes1 = lista_de_clientes(["joao", 'maria', 'jose'])
clientes2 = lista_de_clientes(["dani", 'tiago', 'luana'])
print(clientes1)
print(clientes2) | 23.076923 | 57 | 0.7 |
def lista_de_clientes(clientes_iteravel, lista=None):
if lista is None:
lista = []
lista.extend(clientes_iteravel)
return lista
clientes1 = lista_de_clientes(["joao", 'maria', 'jose'])
clientes2 = lista_de_clientes(["dani", 'tiago', 'luana'])
print(clientes1)
print(clientes2) | 126 | 0 | 23 |
0f1c07ead9fb19310cd22f74ff01be5ee8def92e | 1,555 | py | Python | autonetkit/config.py | sysbot/autonetkit | eb91ee4cb15cc40b81d8d1a23059c1cddde5540f | [
"BSD-3-Clause"
] | 1 | 2015-11-08T07:26:26.000Z | 2015-11-08T07:26:26.000Z | autonetkit/config.py | sysbot/autonetkit | eb91ee4cb15cc40b81d8d1a23059c1cddde5540f | [
"BSD-3-Clause"
] | null | null | null | autonetkit/config.py | sysbot/autonetkit | eb91ee4cb15cc40b81d8d1a23059c1cddde5540f | [
"BSD-3-Clause"
] | null | null | null | import pkg_resources
import ConfigParser
from configobj import ConfigObj, flatten_errors
import os
import validate
validator = validate.Validator()
import os.path
# from http://stackoverflow.com/questions/4028904
ank_user_dir = os.path.join(os.path.expanduser("~"), ".autonetkit")
#NOTE: this only gets loaded once package-wide if imported as import autonetkit.config
settings = load_config()
| 35.340909 | 89 | 0.691961 | import pkg_resources
import ConfigParser
from configobj import ConfigObj, flatten_errors
import os
import validate
validator = validate.Validator()
import os.path
# from http://stackoverflow.com/questions/4028904
ank_user_dir = os.path.join(os.path.expanduser("~"), ".autonetkit")
def load_config():
settings = ConfigParser.RawConfigParser()
spec_file = pkg_resources.resource_filename(__name__,"/config/configspec.cfg")
settings = ConfigObj(configspec=spec_file, encoding='UTF8')
# User's ANK settings
user_config_file = os.path.join(ank_user_dir, "autonetkit.cfg")
settings.merge(ConfigObj(user_config_file))
# ANK settings in current directory
settings.merge(ConfigObj("autonetkit.cfg"))
# ANK settings specified by environment variable
try:
ankcfg = os.environ['AUTONETKIT_CFG']
settings.merge(ConfigObj(ankcfg))
except KeyError:
pass
results = settings.validate(validator)
if results != True:
for (section_list, key, _) in flatten_errors(settings, results):
if key is not None:
print "Error loading configuration file:"
print 'Invalid key "%s" in section "%s"' % (key, ', '.join(section_list))
raise SystemExit
else:
# ignore missing sections - use defaults
#print 'The following section was missing:%s ' % ', '.join(section_list)
pass
return settings
#NOTE: this only gets loaded once package-wide if imported as import autonetkit.config
settings = load_config()
| 1,135 | 0 | 23 |
e80e8324faaea50666d94075ac23360053e2264a | 2,145 | py | Python | tests/modules/test_layers.py | caodoanh2001/uit-mmf | 39e80d179557981e13bc0809fd2e3081893cf8fa | [
"BSD-3-Clause"
] | 44 | 2020-12-10T07:36:11.000Z | 2022-03-01T10:45:31.000Z | tests/modules/test_layers.py | caodoanh2001/uit-mmf | 39e80d179557981e13bc0809fd2e3081893cf8fa | [
"BSD-3-Clause"
] | 11 | 2021-05-12T09:41:27.000Z | 2022-03-02T08:48:04.000Z | tests/modules/test_layers.py | HAWLYQ/Qc-TextCap | 60359f6083b89b442c383dc7eee888e7fbf0c65f | [
"BSD-3-Clause"
] | 8 | 2021-01-10T11:47:57.000Z | 2021-12-25T11:34:37.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
import random
import operator
import functools
import numpy as np
import pythia.modules.layers as layers
| 33 | 97 | 0.647552 | # Copyright (c) Facebook, Inc. and its affiliates.
import unittest
import torch
import random
import operator
import functools
import numpy as np
import pythia.modules.layers as layers
class TestModuleLayers(unittest.TestCase):
def setUp(self):
torch.manual_seed(1234)
def test_conv_net(self):
conv_net = layers.ConvNet(150, 75, 3)
input_tensor = torch.randn(4, 150, 64, 64)
output = conv_net(input_tensor)
expected_size = torch.Size((4, 75, 32, 32))
self.assertEqual(output.size(), expected_size)
# Since seed is fix we can check some of tensor values
np.testing.assert_almost_equal(output[0][0][0][0].item(), 0.149190, decimal=5)
np.testing.assert_almost_equal(output[3][74][31][31].item(), -0.25199, decimal=5)
def test_flatten(self):
flatten = layers.Flatten()
# Test 3 dim
input_tensor = torch.randn(5, 6, 10)
expected_size = torch.Size((5, 60))
actual_size = flatten(input_tensor).size()
self.assertEqual(actual_size, expected_size)
# Test 1 dim
input_tensor = torch.randn(5)
expected_size = torch.Size((5,))
actual_size = flatten(input_tensor).size()
self.assertEqual(actual_size, expected_size)
# Test 6 dim
size_list = [random.randint(2, 4) for _ in range(7)]
expected_size = torch.Size((size_list[0], functools.reduce(operator.mul, size_list[1:])))
input_tensor = torch.randn(*size_list)
actual_size = flatten(input_tensor).size()
self.assertEqual(actual_size, expected_size)
def test_unflatten(self):
unflatten = layers.UnFlatten()
# Test 2 dim to 3 dim
input_tensor = torch.randn(5, 60)
expected_size = torch.Size((5, 6, 10))
actual_size = unflatten(input_tensor, sizes=[6, 10]).size()
self.assertEqual(actual_size, expected_size)
# Test 1 dim
input_tensor = torch.randn(5)
expected_size = torch.Size((5,))
actual_size = unflatten(input_tensor, sizes=[]).size()
self.assertEqual(expected_size, actual_size)
| 1,805 | 21 | 130 |
26e2dd7178926dbec1325dd0b367ed4bdae58ea3 | 1,538 | py | Python | advance/getters_setters_example.py | leonhmi75/learning-materials | 7342bf14e41ee2d1bf1b0b9b52f626318597a75e | [
"MIT"
] | 1 | 2019-05-01T05:25:22.000Z | 2019-05-01T05:25:22.000Z | advance/getters_setters_example.py | leon-lei/learning-materials | 7342bf14e41ee2d1bf1b0b9b52f626318597a75e | [
"MIT"
] | null | null | null | advance/getters_setters_example.py | leon-lei/learning-materials | 7342bf14e41ee2d1bf1b0b9b52f626318597a75e | [
"MIT"
] | null | null | null | # Example code from Aaron Hall StackOverflow response
# https://stackoverflow.com/questions/2627002/whats-the-pythonic-way-to-use-getters-and-setters/36943813#36943813
foo = Protective()
foo.protected_value = 35
print(foo.__dict__)
foo.protected_value = 200 # raises ValueError
del foo.protected_value # raises AttributeError
# Another example from Python Cookbook
| 29.018868 | 113 | 0.680754 | # Example code from Aaron Hall StackOverflow response
# https://stackoverflow.com/questions/2627002/whats-the-pythonic-way-to-use-getters-and-setters/36943813#36943813
class Protective(object):
def __init__(self, start_protected_value=0):
self.protected_value = start_protected_value
@property
def protected_value(self):
return self._protected_value
@protected_value.setter
def protected_value(self, value):
if value != int(value):
raise TypeError("protected_value must be an integer")
if 0 <= value <= 100:
self._protected_value = int(value)
else:
raise ValueError("protected_value must be " +
"between 0 and 100 inclusive")
@protected_value.deleter
def protected_value(self):
raise AttributeError("do not delete, protected_value can be set to 0")
foo = Protective()
foo.protected_value = 35
print(foo.__dict__)
foo.protected_value = 200 # raises ValueError
del foo.protected_value # raises AttributeError
# Another example from Python Cookbook
class Person:
def __init__(self, first_name):
self.first_name = first_name
@property
def first_name(self):
return self._first_name
@first_name.setter
def first_name(self, value):
if not isinstance(value, str):
raise TypeError('Expect a string')
self._first_name = value
@first_name.deleter
def first_name(self):
raise AttributeError("Can't delete attribute")
| 781 | 342 | 45 |
5da644f3d5d76448a91a620d3097b463f3d39801 | 944 | py | Python | gen_spectrogram.py | Zakobian/WD_gas_disk_imaging | b8bda209e541b442f44fdb6109de8f2f72ec38cf | [
"MIT"
] | null | null | null | gen_spectrogram.py | Zakobian/WD_gas_disk_imaging | b8bda209e541b442f44fdb6109de8f2f72ec38cf | [
"MIT"
] | null | null | null | gen_spectrogram.py | Zakobian/WD_gas_disk_imaging | b8bda209e541b442f44fdb6109de8f2f72ec38cf | [
"MIT"
] | null | null | null | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib as mpl
from astropy.visualization import simple_norm
from scipy.integrate import simps
# Generate fake data
from scipy.stats.kde import gaussian_kde
from lightcurve import generate_lightcurve
###
### Plot data from instruments on graphs
###
elems=['SiII','MgII']
inst_names=['MIKE1','MIKE2','Xshooter']
data=[]
for i,elem in enumerate(elems):
fig = plt.figure(i)
ax = fig.add_subplot(1, 1, 1)
for j,inst_name in enumerate(inst_names):
x,y=np.loadtxt('data/'+elem+'_'+inst_name+'.csv', delimiter=',', unpack=True)
data.append((x,y))
area = simps(y-1,x)
y=(y-1)/area
print(simps(y,x))
ax.plot(x,y, linewidth=1,label=inst_name)
ax.legend()
plt.xlabel("Wavelength")
plt.ylabel("Normalized flux")
plt.title(elem)
fig.savefig('figures/'+elem+'.png')
| 23.6 | 85 | 0.65572 | import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib as mpl
from astropy.visualization import simple_norm
from scipy.integrate import simps
# Generate fake data
from scipy.stats.kde import gaussian_kde
from lightcurve import generate_lightcurve
###
### Plot data from instruments on graphs
###
elems=['SiII','MgII']
inst_names=['MIKE1','MIKE2','Xshooter']
data=[]
for i,elem in enumerate(elems):
fig = plt.figure(i)
ax = fig.add_subplot(1, 1, 1)
for j,inst_name in enumerate(inst_names):
x,y=np.loadtxt('data/'+elem+'_'+inst_name+'.csv', delimiter=',', unpack=True)
data.append((x,y))
area = simps(y-1,x)
y=(y-1)/area
print(simps(y,x))
ax.plot(x,y, linewidth=1,label=inst_name)
ax.legend()
plt.xlabel("Wavelength")
plt.ylabel("Normalized flux")
plt.title(elem)
fig.savefig('figures/'+elem+'.png')
| 0 | 0 | 0 |
e83d69d71a5804eec73427f1611fbef51b38accf | 2,600 | py | Python | applications/tapkee/swissroll_embedding.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,753 | 2015-01-02T11:34:13.000Z | 2022-03-25T07:04:27.000Z | applications/tapkee/swissroll_embedding.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 2,404 | 2015-01-02T19:31:41.000Z | 2022-03-09T10:58:22.000Z | applications/tapkee/swissroll_embedding.py | ShankarNara/shogun | 8ab196de16b8d8917e5c84770924c8d0f5a3d17c | [
"BSD-3-Clause"
] | 1,156 | 2015-01-03T01:57:21.000Z | 2022-03-26T01:06:28.000Z | import numpy
numpy.random.seed(40)
tt = numpy.genfromtxt('../../data/toy/swissroll_color.dat',unpack=True).T
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
N = X.shape[1]
converters = []
from shogun import LocallyLinearEmbedding
lle = LocallyLinearEmbedding()
lle.set_k(9)
converters.append((lle, "LLE with k=%d" % lle.get_k()))
from shogun import MultidimensionalScaling
mds = MultidimensionalScaling()
converters.append((mds, "Classic MDS"))
lmds = MultidimensionalScaling()
lmds.set_landmark(True)
lmds.set_landmark_number(20)
converters.append((lmds,"Landmark MDS with %d landmarks" % lmds.get_landmark_number()))
from shogun import Isomap
cisomap = Isomap()
cisomap.set_k(9)
converters.append((cisomap,"Isomap with k=%d" % cisomap.get_k()))
from shogun import DiffusionMaps
from shogun import GaussianKernel
dm = DiffusionMaps()
dm.set_t(2)
dm.set_width(1000.0)
converters.append((dm,"Diffusion Maps with t=%d, sigma=%.1f" % (dm.get_t(),dm.get_width())))
from shogun import HessianLocallyLinearEmbedding
hlle = HessianLocallyLinearEmbedding()
hlle.set_k(6)
converters.append((hlle,"Hessian LLE with k=%d" % (hlle.get_k())))
from shogun import LocalTangentSpaceAlignment
ltsa = LocalTangentSpaceAlignment()
ltsa.set_k(6)
converters.append((ltsa,"LTSA with k=%d" % (ltsa.get_k())))
from shogun import LaplacianEigenmaps
le = LaplacianEigenmaps()
le.set_k(20)
le.set_tau(100.0)
converters.append((le,"Laplacian Eigenmaps with k=%d, tau=%d" % (le.get_k(),le.get_tau())))
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
new_mpl = False
try:
swiss_roll_fig = fig.add_subplot(3,3,1, projection='3d')
new_mpl = True
except:
figure = plt.figure()
swiss_roll_fig = Axes3D(figure)
swiss_roll_fig.scatter(X[0], X[1], X[2], s=10, c=tt, cmap=plt.cm.Spectral)
swiss_roll_fig._axis3don = False
plt.suptitle('Swissroll embedding',fontsize=9)
plt.subplots_adjust(hspace=0.4)
from shogun import RealFeatures
for (i, (converter, label)) in enumerate(converters):
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
features = RealFeatures(X)
converter.set_target_dim(2)
converter.parallel.set_num_threads(1)
new_feats = converter.embed(features).get_feature_matrix()
if not new_mpl:
embedding_subplot = fig.add_subplot(4,2,i+1)
else:
embedding_subplot = fig.add_subplot(3,3,i+2)
embedding_subplot.scatter(new_feats[0],new_feats[1], c=tt, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title(label,fontsize=9)
print converter.get_name(), 'done'
plt.show()
| 29.213483 | 92 | 0.752692 | import numpy
numpy.random.seed(40)
tt = numpy.genfromtxt('../../data/toy/swissroll_color.dat',unpack=True).T
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
N = X.shape[1]
converters = []
from shogun import LocallyLinearEmbedding
lle = LocallyLinearEmbedding()
lle.set_k(9)
converters.append((lle, "LLE with k=%d" % lle.get_k()))
from shogun import MultidimensionalScaling
mds = MultidimensionalScaling()
converters.append((mds, "Classic MDS"))
lmds = MultidimensionalScaling()
lmds.set_landmark(True)
lmds.set_landmark_number(20)
converters.append((lmds,"Landmark MDS with %d landmarks" % lmds.get_landmark_number()))
from shogun import Isomap
cisomap = Isomap()
cisomap.set_k(9)
converters.append((cisomap,"Isomap with k=%d" % cisomap.get_k()))
from shogun import DiffusionMaps
from shogun import GaussianKernel
dm = DiffusionMaps()
dm.set_t(2)
dm.set_width(1000.0)
converters.append((dm,"Diffusion Maps with t=%d, sigma=%.1f" % (dm.get_t(),dm.get_width())))
from shogun import HessianLocallyLinearEmbedding
hlle = HessianLocallyLinearEmbedding()
hlle.set_k(6)
converters.append((hlle,"Hessian LLE with k=%d" % (hlle.get_k())))
from shogun import LocalTangentSpaceAlignment
ltsa = LocalTangentSpaceAlignment()
ltsa.set_k(6)
converters.append((ltsa,"LTSA with k=%d" % (ltsa.get_k())))
from shogun import LaplacianEigenmaps
le = LaplacianEigenmaps()
le.set_k(20)
le.set_tau(100.0)
converters.append((le,"Laplacian Eigenmaps with k=%d, tau=%d" % (le.get_k(),le.get_tau())))
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
new_mpl = False
try:
swiss_roll_fig = fig.add_subplot(3,3,1, projection='3d')
new_mpl = True
except:
figure = plt.figure()
swiss_roll_fig = Axes3D(figure)
swiss_roll_fig.scatter(X[0], X[1], X[2], s=10, c=tt, cmap=plt.cm.Spectral)
swiss_roll_fig._axis3don = False
plt.suptitle('Swissroll embedding',fontsize=9)
plt.subplots_adjust(hspace=0.4)
from shogun import RealFeatures
for (i, (converter, label)) in enumerate(converters):
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
features = RealFeatures(X)
converter.set_target_dim(2)
converter.parallel.set_num_threads(1)
new_feats = converter.embed(features).get_feature_matrix()
if not new_mpl:
embedding_subplot = fig.add_subplot(4,2,i+1)
else:
embedding_subplot = fig.add_subplot(3,3,i+2)
embedding_subplot.scatter(new_feats[0],new_feats[1], c=tt, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title(label,fontsize=9)
print converter.get_name(), 'done'
plt.show()
| 0 | 0 | 0 |
2e64c5515018f3358910aafdbe4a411c6ed9c861 | 858 | py | Python | Home/views.py | indoriyasboyz/E-commerce | a71e7d043899769e48992216bebebc4b43d647ca | [
"MIT"
] | null | null | null | Home/views.py | indoriyasboyz/E-commerce | a71e7d043899769e48992216bebebc4b43d647ca | [
"MIT"
] | null | null | null | Home/views.py | indoriyasboyz/E-commerce | a71e7d043899769e48992216bebebc4b43d647ca | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.generic import TemplateView
from .models import Slider
| 1.769072 | 63 | 0.324009 | from django.shortcuts import render
from django.views.generic import TemplateView
from .models import Slider
class Homeview(TemplateView):
template_name = 'index.html'
context_object_name = 'object_list'
def get_context_data(self, **kwargs):
home = super(Homeview, self).get_context_data(**kwargs)
home['slider'] = Slider.objects.all()
return home
| 146 | 108 | 23 |
1af80653b19b06e6367f7637b1dd35f5b63644d5 | 1,616 | py | Python | tests/test_geomconv.py | nelsyeung/teptools | 90a8cde2793e509b30c6fca0c3f64320855cf7c6 | [
"MIT"
] | null | null | null | tests/test_geomconv.py | nelsyeung/teptools | 90a8cde2793e509b30c6fca0c3f64320855cf7c6 | [
"MIT"
] | null | null | null | tests/test_geomconv.py | nelsyeung/teptools | 90a8cde2793e509b30c6fca0c3f64320855cf7c6 | [
"MIT"
] | null | null | null | """Test geomconv script."""
import os
import pytest
import geomconv
fixtures_dir = os.path.join('tests', 'fixtures')
@pytest.fixture
def chdir_fixtures(request):
"""Change the directory to the fixtures dir and back to the root directory
after finished."""
cwd = os.getcwd()
os.chdir(fixtures_dir)
request.addfinalizer(fin)
def test_main_single(capsys):
"""Supplying a single outfile should print out the correct geomconv."""
outfile = os.path.join(fixtures_dir, 'one.out')
expected_file = os.path.join(fixtures_dir, 'one_expected.geomconv')
geomconv.main([outfile], 'emptyrc')
out, err = capsys.readouterr()
with open(expected_file, 'r') as f:
expected = f.read()
assert out == expected
@pytest.mark.parametrize('outfile', [
[],
['*.out']
])
def test_main_globbing(outfile, capsys, chdir_fixtures):
"""Supplying a glob pattern should also get the correct file."""
geomconv.main(outfile, 'emptyrc')
out, err = capsys.readouterr()
with open('one_expected.geomconv', 'r') as f:
expected = f.read()
assert out == expected
def test_side_view(capsys):
"""Supplying two outfile should print out the two outputs side-by-side."""
outfiles = [os.path.join(fixtures_dir, 'one.out'),
os.path.join(fixtures_dir, 'two.in')]
expected_file = os.path.join(fixtures_dir, 'side_view_expected.geomconv')
geomconv.main(outfiles)
out, err = capsys.readouterr()
with open(expected_file, 'r') as f:
expected = f.read()
assert out == expected
| 26.064516 | 78 | 0.664604 | """Test geomconv script."""
import os
import pytest
import geomconv
fixtures_dir = os.path.join('tests', 'fixtures')
@pytest.fixture
def chdir_fixtures(request):
"""Change the directory to the fixtures dir and back to the root directory
after finished."""
cwd = os.getcwd()
os.chdir(fixtures_dir)
def fin():
os.chdir(cwd)
request.addfinalizer(fin)
def test_main_single(capsys):
"""Supplying a single outfile should print out the correct geomconv."""
outfile = os.path.join(fixtures_dir, 'one.out')
expected_file = os.path.join(fixtures_dir, 'one_expected.geomconv')
geomconv.main([outfile], 'emptyrc')
out, err = capsys.readouterr()
with open(expected_file, 'r') as f:
expected = f.read()
assert out == expected
@pytest.mark.parametrize('outfile', [
[],
['*.out']
])
def test_main_globbing(outfile, capsys, chdir_fixtures):
"""Supplying a glob pattern should also get the correct file."""
geomconv.main(outfile, 'emptyrc')
out, err = capsys.readouterr()
with open('one_expected.geomconv', 'r') as f:
expected = f.read()
assert out == expected
def test_side_view(capsys):
"""Supplying two outfile should print out the two outputs side-by-side."""
outfiles = [os.path.join(fixtures_dir, 'one.out'),
os.path.join(fixtures_dir, 'two.in')]
expected_file = os.path.join(fixtures_dir, 'side_view_expected.geomconv')
geomconv.main(outfiles)
out, err = capsys.readouterr()
with open(expected_file, 'r') as f:
expected = f.read()
assert out == expected
| 11 | 0 | 27 |
5c1c7b61eb9ef5ce9b6c6559d457293eecd0559e | 284 | py | Python | #097 - Um print especial.py | Lucas-HMSC/curso-python3 | b6506d508107c9a43993a7b5795ee39fc3b7c79d | [
"MIT"
] | null | null | null | #097 - Um print especial.py | Lucas-HMSC/curso-python3 | b6506d508107c9a43993a7b5795ee39fc3b7c79d | [
"MIT"
] | null | null | null | #097 - Um print especial.py | Lucas-HMSC/curso-python3 | b6506d508107c9a43993a7b5795ee39fc3b7c79d | [
"MIT"
] | null | null | null |
escreva('Olá, mundo!')
escreva('Eu sou o Lucas :)')
escreva('Estou aprendendo Python')
escreva('Com o Professor Guanabara')
escreva('No CursoEmVideo')
| 20.285714 | 36 | 0.588028 | def escreva(txt):
tam = (len(txt) + 4)
print('~' * tam)
#print(' ',txt,' ')
print(f' {txt}')
print('~' * tam)
escreva('Olá, mundo!')
escreva('Eu sou o Lucas :)')
escreva('Estou aprendendo Python')
escreva('Com o Professor Guanabara')
escreva('No CursoEmVideo')
| 109 | 0 | 22 |
e77de7cf684719b33f16a6e9ac67126172e5133e | 9,035 | py | Python | rlgraph/components/helpers/segment_tree.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 290 | 2018-07-29T15:30:57.000Z | 2022-03-19T02:46:53.000Z | rlgraph/components/helpers/segment_tree.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 76 | 2018-10-19T08:42:01.000Z | 2020-05-03T08:34:21.000Z | rlgraph/components/helpers/segment_tree.py | RLGraph/RLGraph | 428fc136a9a075f29a397495b4226a491a287be2 | [
"Apache-2.0"
] | 41 | 2018-10-30T07:05:05.000Z | 2022-03-01T08:28:24.000Z | # Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rlgraph import get_backend
if get_backend() == "tf":
import tensorflow as tf
class SegmentTree(object):
"""
TensorFlow Segment tree for prioritized replay.
"""
def __init__(
self,
storage_variable,
capacity=1048
):
"""
Helper to represent a segment tree in pure TensorFlow.
Args:
storage_variable (tf.Variable): TensorFlow variable to use for storage.
capacity (int): Capacity of the segment tree.
"""
self.values = storage_variable
self.capacity = capacity
def insert(self, index, element, insert_op=None):
"""
Inserts an element into the segment tree by determining
its position in the tree.
Args:
index (int): Insertion index.
element (any): Element to insert.
insert_op (Union(tf.add, tf.minimum, tf, maximum)): Insert operation on the tree.
"""
insert_op = insert_op or tf.add
index += self.capacity
# Use a TensorArray to collect updates to the segment tree, then perform them all at once.
index_updates = tf.TensorArray(
dtype=tf.int32,
infer_shape=False,
size=1,
dynamic_size=True,
clear_after_read=False
)
element_updates = tf.TensorArray(
dtype=tf.float32,
infer_shape=False,
size=1,
dynamic_size=True,
clear_after_read=False
)
index_updates = index_updates.write(index=0, value=index)
element_updates = element_updates.write(index=0, value=element)
# Search and update values while index >=1
loop_update_index = tf.div(x=index, y=2)
# Return the TensorArrays containing the updates.
loop_update_index, index_updates, element_updates, _ = tf.while_loop(
cond=cond,
body=insert_body,
loop_vars=[loop_update_index, index_updates, element_updates, 1],
parallel_iterations=1,
back_prop=False
)
indices = index_updates.stack()
updates = element_updates.stack()
assignment = tf.scatter_update(ref=self.values, indices=indices, updates=updates)
with tf.control_dependencies(control_inputs=[assignment]):
return tf.no_op()
def get(self, index):
"""
Reads an item from the segment tree.
Args:
index (int):
Returns: The element.
"""
return self.values[self.capacity + index]
def index_of_prefixsum(self, prefix_sum):
"""
Identifies the highest index which satisfies the condition that the sum
over all elements from 0 till the index is <= prefix_sum.
Args:
prefix_sum .float): Upper bound on prefix we are allowed to select.
Returns:
int: Index/indices satisfying prefix sum condition.
"""
assert_ops = list()
# 0 <= prefix_sum <= sum(priorities)
priority_sum = tf.reduce_sum(input_tensor=self.values, axis=0)
# priority_sum_tensor = tf.fill(dims=tf.shape(prefix_sum), value=priority_sum)
assert_ops.append(tf.Assert(
condition=tf.less_equal(x=prefix_sum, y=priority_sum),
data=[prefix_sum]
))
# Vectorized loop -> initialize all indices matching elements in prefix-sum,
index = 1
with tf.control_dependencies(control_inputs=assert_ops):
index, _ = tf.while_loop(cond=cond, body=search_body, loop_vars=[index, prefix_sum])
return index - self.capacity
def reduce(self, start, limit, reduce_op=None):
"""
Applies an operation to specified segment.
Args:
start (int): Start index to apply reduction to.
limit (end): End index to apply reduction to.
reduce_op (Union(tf.add, tf.minimum, tf.maximum)): Reduce op to apply.
Returns:
Number: Result of reduce operation
"""
reduce_op = reduce_op or tf.add
# Init result with neutral element of reduce op.
# Note that all of these are commutative reduce ops.
if reduce_op == tf.add:
result = 0.0
elif reduce_op == tf.minimum:
result = float('inf')
elif reduce_op == tf.maximum:
result = float('-inf')
else:
raise ValueError("Unsupported reduce OP. Support ops are [tf.add, tf.minimum, tf.maximum]")
start += self.capacity
limit += self.capacity
_, _, result = tf.while_loop(cond=cond, body=reduce_body, loop_vars=(start, limit, result))
return result
def get_min_value(self):
"""
Returns min value of storage variable.
"""
return self.reduce(0, self.capacity - 1, reduce_op=tf.minimum)
def get_sum(self):
"""
Returns sum value of storage variable.
"""
return self.reduce(0, self.capacity - 1, reduce_op=tf.add)
| 35.431373 | 111 | 0.591588 | # Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from rlgraph import get_backend
if get_backend() == "tf":
import tensorflow as tf
class SegmentTree(object):
"""
TensorFlow Segment tree for prioritized replay.
"""
def __init__(
self,
storage_variable,
capacity=1048
):
"""
Helper to represent a segment tree in pure TensorFlow.
Args:
storage_variable (tf.Variable): TensorFlow variable to use for storage.
capacity (int): Capacity of the segment tree.
"""
self.values = storage_variable
self.capacity = capacity
def insert(self, index, element, insert_op=None):
"""
Inserts an element into the segment tree by determining
its position in the tree.
Args:
index (int): Insertion index.
element (any): Element to insert.
insert_op (Union(tf.add, tf.minimum, tf, maximum)): Insert operation on the tree.
"""
insert_op = insert_op or tf.add
index += self.capacity
# Use a TensorArray to collect updates to the segment tree, then perform them all at once.
index_updates = tf.TensorArray(
dtype=tf.int32,
infer_shape=False,
size=1,
dynamic_size=True,
clear_after_read=False
)
element_updates = tf.TensorArray(
dtype=tf.float32,
infer_shape=False,
size=1,
dynamic_size=True,
clear_after_read=False
)
index_updates = index_updates.write(index=0, value=index)
element_updates = element_updates.write(index=0, value=element)
# Search and update values while index >=1
loop_update_index = tf.div(x=index, y=2)
def insert_body(loop_update_index, index_updates, element_updates, call_index):
# This is the index we just updated.
prev_index = index_updates.read(call_index - 1)
prev_val = element_updates.read(call_index - 1)
update_val = tf.where(
condition=tf.greater(x=prev_index % 2, y=0),
# Previous index was odd because of loop init -> 2 * index + 1 is in element_updates,
# 2 * index is in variable values
x=insert_op(x=self.values[2 * loop_update_index],
y=prev_val),
# Previous index was even -> 2 * index is in element updates, 2 * index + 1 in variable values.
y=insert_op(x=prev_val,
y=self.values[2 * loop_update_index + 1])
)
index_updates = index_updates.write(call_index, loop_update_index)
element_updates = element_updates.write(call_index, update_val)
return tf.div(x=loop_update_index, y=2), index_updates, element_updates, call_index + 1
def cond(loop_update_index, index_updates, element_updates, call_index):
return loop_update_index >= 1
# Return the TensorArrays containing the updates.
loop_update_index, index_updates, element_updates, _ = tf.while_loop(
cond=cond,
body=insert_body,
loop_vars=[loop_update_index, index_updates, element_updates, 1],
parallel_iterations=1,
back_prop=False
)
indices = index_updates.stack()
updates = element_updates.stack()
assignment = tf.scatter_update(ref=self.values, indices=indices, updates=updates)
with tf.control_dependencies(control_inputs=[assignment]):
return tf.no_op()
def get(self, index):
"""
Reads an item from the segment tree.
Args:
index (int):
Returns: The element.
"""
return self.values[self.capacity + index]
def index_of_prefixsum(self, prefix_sum):
"""
Identifies the highest index which satisfies the condition that the sum
over all elements from 0 till the index is <= prefix_sum.
Args:
prefix_sum .float): Upper bound on prefix we are allowed to select.
Returns:
int: Index/indices satisfying prefix sum condition.
"""
assert_ops = list()
# 0 <= prefix_sum <= sum(priorities)
priority_sum = tf.reduce_sum(input_tensor=self.values, axis=0)
# priority_sum_tensor = tf.fill(dims=tf.shape(prefix_sum), value=priority_sum)
assert_ops.append(tf.Assert(
condition=tf.less_equal(x=prefix_sum, y=priority_sum),
data=[prefix_sum]
))
# Vectorized loop -> initialize all indices matching elements in prefix-sum,
index = 1
def search_body(index, prefix_sum):
# Is the value at position 2 * index > prefix sum?
compare_value = self.values[2 * index]
def update_prefix_sum_fn(index, prefix_sum):
# 'Use up' values in this segment, then jump to next.
prefix_sum -= self.values[2 * index]
return 2 * index + 1, prefix_sum
index, prefix_sum = tf.cond(
pred=compare_value > prefix_sum,
# If over prefix sum, jump index.
true_fn=lambda: (2 * index, prefix_sum),
# Else adjust prefix sum until done.
false_fn=lambda: update_prefix_sum_fn(index, prefix_sum)
)
return index, prefix_sum
def cond(index, prefix_sum):
return index < self.capacity
with tf.control_dependencies(control_inputs=assert_ops):
index, _ = tf.while_loop(cond=cond, body=search_body, loop_vars=[index, prefix_sum])
return index - self.capacity
def reduce(self, start, limit, reduce_op=None):
"""
Applies an operation to specified segment.
Args:
start (int): Start index to apply reduction to.
limit (end): End index to apply reduction to.
reduce_op (Union(tf.add, tf.minimum, tf.maximum)): Reduce op to apply.
Returns:
Number: Result of reduce operation
"""
reduce_op = reduce_op or tf.add
# Init result with neutral element of reduce op.
# Note that all of these are commutative reduce ops.
if reduce_op == tf.add:
result = 0.0
elif reduce_op == tf.minimum:
result = float('inf')
elif reduce_op == tf.maximum:
result = float('-inf')
else:
raise ValueError("Unsupported reduce OP. Support ops are [tf.add, tf.minimum, tf.maximum]")
start += self.capacity
limit += self.capacity
def reduce_body(start, limit, result):
start_mod = tf.mod(x=start, y=2)
def update_start_fn(start, result):
result = reduce_op(x=result, y=self.values[start])
start += 1
return start, result
start, result = tf.cond(
pred=tf.equal(x=start_mod, y=0),
true_fn=lambda: (start, result),
false_fn=lambda: update_start_fn(start, result)
)
end_mod = tf.mod(x=limit, y=2)
def update_limit_fn(limit, result):
limit -= 1
result = reduce_op(x=result, y=self.values[limit])
return limit, result
limit, result = tf.cond(
pred=tf.equal(x=end_mod, y=0),
true_fn=lambda: (limit, result),
false_fn=lambda: update_limit_fn(limit, result)
)
return tf.div(x=start, y=2), tf.div(x=limit, y=2), result
def cond(start, limit, result):
return start < limit
_, _, result = tf.while_loop(cond=cond, body=reduce_body, loop_vars=(start, limit, result))
return result
def get_min_value(self):
"""
Returns min value of storage variable.
"""
return self.reduce(0, self.capacity - 1, reduce_op=tf.minimum)
def get_sum(self):
"""
Returns sum value of storage variable.
"""
return self.reduce(0, self.capacity - 1, reduce_op=tf.add)
| 2,954 | 0 | 186 |
6734482244ba9249543e08f2dfed0a1ef77bbf57 | 1,828 | py | Python | plugins/costume_loader_pkg/__init__.py | AathmanT/qhana-plugin-runner | 206f9fa646e5b47bacf95a3b9be7e2b72576c9f1 | [
"Apache-2.0"
] | null | null | null | plugins/costume_loader_pkg/__init__.py | AathmanT/qhana-plugin-runner | 206f9fa646e5b47bacf95a3b9be7e2b72576c9f1 | [
"Apache-2.0"
] | 1 | 2021-09-02T07:56:23.000Z | 2021-09-03T11:46:41.000Z | plugins/costume_loader_pkg/__init__.py | AathmanT/qhana-plugin-runner | 206f9fa646e5b47bacf95a3b9be7e2b72576c9f1 | [
"Apache-2.0"
] | 2 | 2021-10-12T13:50:57.000Z | 2022-03-27T12:12:23.000Z | # Copyright 2021 QHAna plugin runner contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from flask.app import Flask
from qhana_plugin_runner.api.util import SecurityBlueprint
from qhana_plugin_runner.util.plugins import QHAnaPluginBase, plugin_identifier
_plugin_name = "costume-loader"
__version__ = "v0.1.0"
_identifier = plugin_identifier(_plugin_name, __version__)
COSTUME_LOADER_BLP = SecurityBlueprint(
_identifier, # blueprint name
__name__, # module import name!
description="Costume loader API.",
template_folder="costume_loader_templates",
)
try:
# It is important to import the routes **after** COSTUME_LOADER_BLP and CostumeLoader are defined, because they are
# accessed as soon as the routes are imported.
import plugins.costume_loader_pkg.routes
except ImportError:
# When running `poetry run flask install`, importing the routes will fail, because the dependencies are not
# installed yet.
pass
| 32.642857 | 119 | 0.754923 | # Copyright 2021 QHAna plugin runner contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from flask.app import Flask
from qhana_plugin_runner.api.util import SecurityBlueprint
from qhana_plugin_runner.util.plugins import QHAnaPluginBase, plugin_identifier
_plugin_name = "costume-loader"
__version__ = "v0.1.0"
_identifier = plugin_identifier(_plugin_name, __version__)
COSTUME_LOADER_BLP = SecurityBlueprint(
_identifier, # blueprint name
__name__, # module import name!
description="Costume loader API.",
template_folder="costume_loader_templates",
)
class CostumeLoader(QHAnaPluginBase):
name = _plugin_name
version = __version__
def __init__(self, app: Optional[Flask]) -> None:
super().__init__(app)
def get_api_blueprint(self):
return COSTUME_LOADER_BLP
def get_requirements(self) -> str:
return "mysql-connector-python~=8.0.26"
try:
# It is important to import the routes **after** COSTUME_LOADER_BLP and CostumeLoader are defined, because they are
# accessed as soon as the routes are imported.
import plugins.costume_loader_pkg.routes
except ImportError:
# When running `poetry run flask install`, importing the routes will fail, because the dependencies are not
# installed yet.
pass
| 160 | 147 | 23 |
51f5c2d34695eb25b016f247877ce1ad459946c9 | 313 | py | Python | salesforce/models.py | Atri10/Convin-Assignment | da46ce9944979a7d1c534e2df58e9e60b9dca10e | [
"MIT"
] | null | null | null | salesforce/models.py | Atri10/Convin-Assignment | da46ce9944979a7d1c534e2df58e9e60b9dca10e | [
"MIT"
] | null | null | null | salesforce/models.py | Atri10/Convin-Assignment | da46ce9944979a7d1c534e2df58e9e60b9dca10e | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 31.3 | 74 | 0.731629 | from django.db import models
# Create your models here.
class SalesForceUsers(models.Model):
AccountName = models.CharField(max_length=300, blank=True, null=True)
Type = models.CharField(max_length=300, blank=True, null=True)
Phone = models.CharField(max_length=20, blank=True, null=True)
| 0 | 226 | 25 |
eec3e9967167c77b8afccb411419b4c52c44290f | 1,400 | py | Python | database work/insert_users.py | tudor-bujdei-leonte/Foodents | 86ce25afc30330718d1308b66a80e869285f1fae | [
"RSA-MD"
] | 1 | 2021-04-16T22:16:06.000Z | 2021-04-16T22:16:06.000Z | database work/insert_users.py | aditya-5/Foodents | 81cb55c364b00b35e215bd488de186ff000dad82 | [
"RSA-MD"
] | null | null | null | database work/insert_users.py | aditya-5/Foodents | 81cb55c364b00b35e215bd488de186ff000dad82 | [
"RSA-MD"
] | null | null | null | from random import choice, randint
words = []
names = []
with open("EnglishWords.txt", "r") as f:
line = f.readline().strip()
while line:
words.append(line)
line = f.readline().strip()
with open("names.txt", "r") as f:
line = f.readline().strip()
while line:
names.append(line)
line = f.readline().strip()
with open("sql_insert.txt", "w") as f:
for i in range(0, 100):
username = ""
password = ""
email = ""
first_name = ""
last_name = ""
username = choice(words) + choice(words)
if randint(0, 1):
username = username[0].upper() + username[1:]
if randint(0, 1):
username += str(randint(0, 100))
pw1 = choice(words)
if randint(0, 1):
pw1 = pw1.upper()
pw2 = choice(words)
if randint(0, 1):
pw2 = pw2.upper()
pw3 = choice(words)
if randint(0, 1):
pw3 = pw3.upper()
password = pw1 + pw2 + pw3 + str(randint(0, 1000))
first_name = choice(names)
last_name = choice(names)
email = first_name.lower() + "." + last_name.lower() + "@student.manchester.ac.uk"
sql = f"INSERT INTO USERS (username, password, email, first_name, last_name) VALUES ('{username}', '{password}', '{email}', '{first_name}', '{last_name}');"
f.write(sql) | 28.571429 | 164 | 0.529286 | from random import choice, randint
words = []
names = []
with open("EnglishWords.txt", "r") as f:
line = f.readline().strip()
while line:
words.append(line)
line = f.readline().strip()
with open("names.txt", "r") as f:
line = f.readline().strip()
while line:
names.append(line)
line = f.readline().strip()
with open("sql_insert.txt", "w") as f:
for i in range(0, 100):
username = ""
password = ""
email = ""
first_name = ""
last_name = ""
username = choice(words) + choice(words)
if randint(0, 1):
username = username[0].upper() + username[1:]
if randint(0, 1):
username += str(randint(0, 100))
pw1 = choice(words)
if randint(0, 1):
pw1 = pw1.upper()
pw2 = choice(words)
if randint(0, 1):
pw2 = pw2.upper()
pw3 = choice(words)
if randint(0, 1):
pw3 = pw3.upper()
password = pw1 + pw2 + pw3 + str(randint(0, 1000))
first_name = choice(names)
last_name = choice(names)
email = first_name.lower() + "." + last_name.lower() + "@student.manchester.ac.uk"
sql = f"INSERT INTO USERS (username, password, email, first_name, last_name) VALUES ('{username}', '{password}', '{email}', '{first_name}', '{last_name}');"
f.write(sql) | 0 | 0 | 0 |
d040a94d8f01cc00b64ad0dd10b0c9e1eb519282 | 42 | py | Python | __init__.py | AhMeD-PS4/idk | a88fe65da042f4fd9467e9f97882fafdee2b887d | [
"MIT"
] | null | null | null | __init__.py | AhMeD-PS4/idk | a88fe65da042f4fd9467e9f97882fafdee2b887d | [
"MIT"
] | null | null | null | __init__.py | AhMeD-PS4/idk | a88fe65da042f4fd9467e9f97882fafdee2b887d | [
"MIT"
] | null | null | null | import os
import discord
import requests
| 10.5 | 15 | 0.833333 | import os
import discord
import requests
| 0 | 0 | 0 |
43d5905f58986d9b09e2032d779e2cb6c323250c | 1,927 | py | Python | slotserver/slot_service.py | h-dub/slotserver | a4067fb4f756fd5bc7681b36a233647b367780dd | [
"MIT"
] | null | null | null | slotserver/slot_service.py | h-dub/slotserver | a4067fb4f756fd5bc7681b36a233647b367780dd | [
"MIT"
] | null | null | null | slotserver/slot_service.py | h-dub/slotserver | a4067fb4f756fd5bc7681b36a233647b367780dd | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Hugh Wade
# SPDX-License-Identifier: MIT
import slotserver.slot_repository as sr
MAX_ID_LEN = 1024
MAX_DATA_LEN = 1024 * 8
MAX_BATCH_SLOTS = 10
MAX_BATCH_SUBSLOTS = 10
class SlotOverflowException(Exception):
'''
Raised when something is bigger than allowed
'''
pass
class SlotUnderflowException(Exception):
'''
Raised when something is smaller than allowed
'''
pass
class SlotConsumerService():
'''
Read only interface to slot data.
Enforces size constraints that mitigate DOS attack vectors.
'''
def get_slotdata(self, slot_ids: object, subslot_ids: object) -> object:
'''
Get data for a set of slot/subslots.
Returned as a Dictionary of Dictionaries: data[slot_id][subslot_id]
'''
if(len(slot_ids) > MAX_BATCH_SLOTS or
len(subslot_ids) > MAX_BATCH_SUBSLOTS):
raise SlotOverflowException()
if(len(slot_ids) == 0 or
len(subslot_ids) == 0):
raise SlotUnderflowException()
results = {}
for slot_id in slot_ids:
results[slot_id] = {}
for subslot_id in subslot_ids:
results[slot_id][subslot_id] = \
self.repo.get(slot_id, subslot_id, False)
return results
class SlotProducerService():
'''
Write only interface to slot data.
Enforces size constraints that mitigate DOS attack vectors.
'''
| 26.39726 | 76 | 0.636222 | # Copyright (c) 2020 Hugh Wade
# SPDX-License-Identifier: MIT
import slotserver.slot_repository as sr
MAX_ID_LEN = 1024
MAX_DATA_LEN = 1024 * 8
MAX_BATCH_SLOTS = 10
MAX_BATCH_SUBSLOTS = 10
class SlotOverflowException(Exception):
'''
Raised when something is bigger than allowed
'''
pass
class SlotUnderflowException(Exception):
'''
Raised when something is smaller than allowed
'''
pass
class SlotConsumerService():
'''
Read only interface to slot data.
Enforces size constraints that mitigate DOS attack vectors.
'''
def __init__(self, repo: sr.SlotRepositoryInterface):
self.repo = repo
def get_slotdata(self, slot_ids: object, subslot_ids: object) -> object:
'''
Get data for a set of slot/subslots.
Returned as a Dictionary of Dictionaries: data[slot_id][subslot_id]
'''
if(len(slot_ids) > MAX_BATCH_SLOTS or
len(subslot_ids) > MAX_BATCH_SUBSLOTS):
raise SlotOverflowException()
if(len(slot_ids) == 0 or
len(subslot_ids) == 0):
raise SlotUnderflowException()
results = {}
for slot_id in slot_ids:
results[slot_id] = {}
for subslot_id in subslot_ids:
results[slot_id][subslot_id] = \
self.repo.get(slot_id, subslot_id, False)
return results
class SlotProducerService():
'''
Write only interface to slot data.
Enforces size constraints that mitigate DOS attack vectors.
'''
def __init__(self, repo: sr.SlotRepositoryInterface):
self.repo = repo
def update_slot(self, slot_id: str, subslot_id: str, data: str) -> None:
if(len(slot_id) > MAX_ID_LEN or
len(subslot_id) > MAX_ID_LEN or
len(data) > MAX_DATA_LEN):
raise SlotOverflowException()
self.repo.upsert(slot_id, subslot_id, data)
| 381 | 0 | 81 |
b3c2833d5b2bc7ec563b2cbef6bf17779c7a1e7f | 2,540 | py | Python | src/asphalt/exceptions/reporters/sentry.py | asphalt-framework/asphalt-exceptions | d47211cf38025b6cdbce2c2c6a35a9b68cc8d717 | [
"Apache-2.0"
] | 1 | 2017-10-30T04:28:21.000Z | 2017-10-30T04:28:21.000Z | src/asphalt/exceptions/reporters/sentry.py | asphalt-framework/asphalt-sentry | 7a750c8cf9700b04549ecf32dacea6c63594a58a | [
"Apache-2.0"
] | null | null | null | src/asphalt/exceptions/reporters/sentry.py | asphalt-framework/asphalt-sentry | 7a750c8cf9700b04549ecf32dacea6c63594a58a | [
"Apache-2.0"
] | null | null | null | from __future__ import annotations
import logging
from typing import Any, Dict, Sequence, Union
import sentry_sdk
from asphalt.core import Context, resolve_reference
from sentry_sdk.integrations import Integration
from typeguard import check_argument_types
from asphalt.exceptions.api import ExceptionReporter
logger = logging.getLogger(__name__)
class SentryExceptionReporter(ExceptionReporter):
"""
Reports exceptions using the Sentry_ service.
To use this backend, install asphalt-exceptions with the ``sentry`` extra.
All keyword arguments are directly passed to :func:`sentry_sdk.init`.
The following defaults are set for the client arguments:
* environment: "development" or "production", depending on the ``__debug__`` flag
Integrations can be added via the ``integrations`` option which is a list where each item is
either an object that implements the :class:`sentry_sdk.integrations.Integration` interface,
or a dictionary where the ``type`` key is a module:varname reference to a class implementing
the aforementioned interface. The ``args`` key, when present, should be a sequence that is
passed to the integration as positional arguments, while the ``kwargs`` key, when present,
should be a mapping of keyword arguments to their values.
The extras passed to this backend are passed to :func:`sentry_sdk.capture_exception` as keyword
arguments.
For more information, see the `Sentry SDK documentation`_.
.. _Sentry: https://sentry.io/
.. _Sentry SDK documentation: https://docs.sentry.io/platforms/python/
"""
| 36.811594 | 99 | 0.701969 | from __future__ import annotations
import logging
from typing import Any, Dict, Sequence, Union
import sentry_sdk
from asphalt.core import Context, resolve_reference
from sentry_sdk.integrations import Integration
from typeguard import check_argument_types
from asphalt.exceptions.api import ExceptionReporter
logger = logging.getLogger(__name__)
class SentryExceptionReporter(ExceptionReporter):
"""
Reports exceptions using the Sentry_ service.
To use this backend, install asphalt-exceptions with the ``sentry`` extra.
All keyword arguments are directly passed to :func:`sentry_sdk.init`.
The following defaults are set for the client arguments:
* environment: "development" or "production", depending on the ``__debug__`` flag
Integrations can be added via the ``integrations`` option which is a list where each item is
either an object that implements the :class:`sentry_sdk.integrations.Integration` interface,
or a dictionary where the ``type`` key is a module:varname reference to a class implementing
the aforementioned interface. The ``args`` key, when present, should be a sequence that is
passed to the integration as positional arguments, while the ``kwargs`` key, when present,
should be a mapping of keyword arguments to their values.
The extras passed to this backend are passed to :func:`sentry_sdk.capture_exception` as keyword
arguments.
For more information, see the `Sentry SDK documentation`_.
.. _Sentry: https://sentry.io/
.. _Sentry SDK documentation: https://docs.sentry.io/platforms/python/
"""
def __init__(
self, integrations: Sequence[Union[Integration, Dict[str, Any]]] = (), **options
) -> None:
check_argument_types()
options.setdefault("environment", "development" if __debug__ else "production")
integrations_: list[Integration] = []
for integration in integrations:
if isinstance(integration, dict):
integration_class = resolve_reference(integration["type"])
integration = integration_class(
*integration.get("args", ()), **integration.get("kwargs", {})
)
integrations_.append(integration)
sentry_sdk.init(integrations=integrations_, **options)
def report_exception(
self,
ctx: Context,
exception: BaseException,
message: str,
extra: dict[str, Any],
) -> None:
sentry_sdk.capture_exception(exception, **extra)
| 879 | 0 | 54 |
c407f72c37addaef08c0b14cfb40c53b7b43e992 | 1,782 | py | Python | base/send_email.py | medivhXu/AT-M | e1c215ae95085d1be24a7566fd365eb6bfae5e53 | [
"Apache-2.0"
] | 1 | 2019-06-05T08:53:47.000Z | 2019-06-05T08:53:47.000Z | base/send_email.py | medivhXu/AT-M | e1c215ae95085d1be24a7566fd365eb6bfae5e53 | [
"Apache-2.0"
] | null | null | null | base/send_email.py | medivhXu/AT-M | e1c215ae95085d1be24a7566fd365eb6bfae5e53 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
"""
@author: Medivh Xu
@file: send_email.py
@time: 2020-03-04 21:27
"""
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
| 38.73913 | 104 | 0.601571 | #!/usr/bin/env python3
# encoding: utf-8
"""
@author: Medivh Xu
@file: send_email.py
@time: 2020-03-04 21:27
"""
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
def smtp_email(sender, receivers, password, smtp_server, port, html=None, attachment=None,
subject="***来自PythonUI自动化***"):
message = MIMEMultipart()
mail_msg = "<h1>UI自动化测试报告</h1><p>您提交的UI自动化测试已经测试完毕,附件中存放您的测试报告和测试日志.</p><p>"
message['From'] = sender
message['To'] = ','.join(receivers)
message['Subject'] = Header(subject, 'utf-8')
message.attach(MIMEText(mail_msg, 'html', 'utf-8'))
if html:
message.attach(MIMEText(html, 'html', 'utf-8'))
if attachment:
if isinstance(attachment, list) or isinstance(attachment, tuple):
for fp in attachment:
with open(fp, 'r', encoding='utf-8') as f:
att = MIMEText(f.read(), 'base64', 'utf-8')
att["Content-Type"] = 'application/octet-stream'
att["Content-Disposition"] = 'attachment; filename={}'.format(fp.split('/')[-1])
message.attach(att)
else:
with open(attachment, 'r', encoding='utf-8') as f:
att = MIMEText(f.read(), 'base64', 'utf-8')
att["Content-Type"] = 'application/octet-stream'
att["Content-Disposition"] = 'attachment; filename={}'.format(attachment.split('/')[-1])
message.attach(att)
try:
smtp_obj = smtplib.SMTP_SSL(smtp_server, port)
smtp_obj.login(sender, password)
smtp_obj.sendmail(sender, receivers, message.as_string())
return True
except Exception as e:
return False
| 1,601 | 0 | 23 |
a3d0f94fdf45ea8e8217312d2911c7668070c62d | 7,177 | py | Python | alveus/layers/LayerEsnReservoir.py | levifussell/Alveus | 730f06d39dfd3f761cfecc4cc2834d79a11f3845 | [
"MIT"
] | 2 | 2018-04-14T19:04:00.000Z | 2019-03-22T23:11:32.000Z | alveus/layers/LayerEsnReservoir.py | levifussell/alveus | 730f06d39dfd3f761cfecc4cc2834d79a11f3845 | [
"MIT"
] | null | null | null | alveus/layers/LayerEsnReservoir.py | levifussell/alveus | 730f06d39dfd3f761cfecc4cc2834d79a11f3845 | [
"MIT"
] | null | null | null | import numpy as np
import numpy.linalg as la
from collections import deque
from .LayerReservoir import LayerReservoir
"""
Notes (from scholarpedia):
-The SPECTRAL RADIUS of the reservoir weights codetermines:
(1): (?)
(2): amount of nonlinear interaction of input components through time
(larger spectral radius ==> longer-range interactions)
-INPUT SCALING codetermines the degree of nonlinearity of the reservoir dynamics. Examples:
(1): very small input amplitudes ==> reservoir behaves almost like linear medium.
(2): very large input amplitudes ==> drives the reservoir neurons to the saturation of the
sigmoid, and a binary switching dynamic results.
-OUTPUT FEEDBACK SCALING determines the extent to which the trained ESN has an autonomous
generation component.
(1): no output feedback: ESN unable to generate predictions for future time steps.
(2): nonzero output feedbacl: danger of dynamical instability.
-CONNECTIVITY/SPARSITY of reservoir weight matrix:
(1) todo
"""
class LayerEsnReservoir(LayerReservoir):
"""
(args):
input_size : input signal is input_size dimensions.
num_units : reservoir has num_units units.
idx : unique ID of the reservoir (default=None) -- good for debug/multiple reservoirs
echo_param : leaky rate of the reservoir units
activation : activation function of the reservoir units (default=tanh)
debug : when True, this will print live information (default=False)
(description): reservoir class. Extend this class to create different reservoirs
"""
def info(self):
"""
(args): None
(description):
Print live info about the reservoir
"""
out = u'Reservoir(num_units=%d, input_size=%d, output_size=%d, \u03B5=%.2f)\n' % (self.num_units, self.input_size, self.output_size, self.echo_param)
out += 'W_res - spec_scale: %.2f, %s init\n' % (self.spectral_scale, self.W_res_init_strategy)
out += 'W_in - scale: %.2f, %s init' % (self.input_weights_scale, self.W_in_init_strategy)
return out
def forward(self, x):
"""
Forward propagate input signal u(n) (at time n) through reservoir.
x: input_size-dimensional input vector
"""
super(LayerEsnReservoir, self).forward(x)
assert self.ins_init, "Res. input weights not yet initialized (ID=%d)." % self.idx
assert self.res_init, "Res. recurrent weights not yet initialized (ID=%d)." % self.idx
# prob =
# dropped = (np.random.rand(*np.shape(self.W_res)) < prob).astype(float)
# mask_n = (np.random.rand(self.num_units,1) > self.drop_probability).astype(float)
# print("V", np.repeat(mask_n, self.num_units, axis=1))
# print("H", np.repeat(mask_n.T, self.num_units, axis=0))
# mask_v = np.repeat(mask_n, self.num_units, axis=1)
# dropped = mask_v * mask_v.T
in_to_res = np.dot(self.W_in, x).squeeze()
self.prev_in_to_res = np.copy(in_to_res)
res_to_res = np.dot(self.state.reshape(1, -1), self.W_res)
self.prev_res_to_res = np.copy(res_to_res)
# Equation (1) in "Formalism and Theory" of Scholarpedia page
self.prev_state = np.copy(self.state)
self.state = (1. - self.echo_param) * self.state + self.echo_param * self.activation(in_to_res + res_to_res)
# self.signals.append(self.state[:self.num_to_store].tolist())
#if self.output_size == self.num_units:
output = self.state.squeeze()
#else:
# return the reservoir state appended to the input
#output = np.hstack((self.state.squeeze(), x))
return output
| 43.23494 | 157 | 0.616414 | import numpy as np
import numpy.linalg as la
from collections import deque
from .LayerReservoir import LayerReservoir
"""
Notes (from scholarpedia):
-The SPECTRAL RADIUS of the reservoir weights codetermines:
(1): (?)
(2): amount of nonlinear interaction of input components through time
(larger spectral radius ==> longer-range interactions)
-INPUT SCALING codetermines the degree of nonlinearity of the reservoir dynamics. Examples:
(1): very small input amplitudes ==> reservoir behaves almost like linear medium.
(2): very large input amplitudes ==> drives the reservoir neurons to the saturation of the
sigmoid, and a binary switching dynamic results.
-OUTPUT FEEDBACK SCALING determines the extent to which the trained ESN has an autonomous
generation component.
(1): no output feedback: ESN unable to generate predictions for future time steps.
(2): nonzero output feedbacl: danger of dynamical instability.
-CONNECTIVITY/SPARSITY of reservoir weight matrix:
(1) todo
"""
class LayerEsnReservoir(LayerReservoir):
"""
(args):
input_size : input signal is input_size dimensions.
num_units : reservoir has num_units units.
idx : unique ID of the reservoir (default=None) -- good for debug/multiple reservoirs
echo_param : leaky rate of the reservoir units
activation : activation function of the reservoir units (default=tanh)
debug : when True, this will print live information (default=False)
(description): reservoir class. Extend this class to create different reservoirs
"""
def __init__(self, input_size, num_units, output_size=None, echo_param=0.6, idx=None,
activation=np.tanh, debug=False):
if output_size == None:
super(LayerEsnReservoir, self).__init__(input_size,
num_units,
num_units)
else:
super(LayerEsnReservoir, self).__init__(input_size,
output_size,
num_units)
self.echo_param = echo_param
self.activation = activation
self.idx = idx # <- can assign reservoir a unique ID for debugging
self.debug = debug
# input-to-reservoir, reservoir-to-reservoir weights (not yet initialized)
self.W_res = np.zeros((self.num_units, self.num_units))
self.state = np.zeros(self.num_units) # <- unit states
# These parameters are initialized upon calling initialize_input_weights()
# and initialize_reservoir_weights().
self.spectral_scale = None
self.sparsity = None
self.W_res_init_strategy = None
self.sparsity = None
# helpful information to track
# self.signals = [] # <- reservoir states over time during training
self.max_signal_store = 100
self.signals = deque(maxlen=self.max_signal_store) #<- reservoir states over time during training
self.num_to_store = 50
self.ins_init = False
self.res_init = False
self.drop_probability = 0.5
self.prev_in_to_res = None
self.prev_res_to_res = None
self.prev_sate = None
def info(self):
"""
(args): None
(description):
Print live info about the reservoir
"""
out = u'Reservoir(num_units=%d, input_size=%d, output_size=%d, \u03B5=%.2f)\n' % (self.num_units, self.input_size, self.output_size, self.echo_param)
out += 'W_res - spec_scale: %.2f, %s init\n' % (self.spectral_scale, self.W_res_init_strategy)
out += 'W_in - scale: %.2f, %s init' % (self.input_weights_scale, self.W_in_init_strategy)
return out
def initialize_reservoir(self, strategy='uniform', **kwargs):
if 'spectral_scale' not in kwargs.keys():
self.spectral_scale = 1.0
else:
self.spectral_scale = kwargs['spectral_scale']
if 'strategy' not in kwargs.keys():
self.W_res_init_strategy = 'uniform'
else:
self.W_res_init_strategy = kwargs['strategy']
if 'sparsity' not in kwargs.keys():
self.sparsity = 1.0
else:
self.sparsity = kwargs['sparsity']
if 'offset' not in kwargs.keys():
offset = 0.5
else:
offset = kwargs['offset']
if self.W_res_init_strategy == 'binary':
self.W_res = (np.random.rand(self.num_units, self.num_units) > 0.5).astype(float)
elif self.W_res_init_strategy == 'uniform':
self.W_res = np.random.rand(self.num_units, self.num_units)
elif self.W_res_init_strategy == 'gaussian':
self.W_res = np.random.randn(self.num_units, self.num_units)
else:
raise ValueError('unknown res. weight init strategy %s' %
self.W_res_init_strategy)
# apply the sparsity
sparsity_matrix = (np.random.rand(self.num_units, self.num_units) < self.sparsity).astype(float)
self.W_res -= offset
self.W_res *= sparsity_matrix
self.W_res /= max(abs(la.eig(self.W_res)[0]))
self.W_res *= self.spectral_scale
self.res_init = True
def reset(self):
super(LayerEsnReservoir, self).reset()
self.state = np.zeros(self.num_units)
def forward(self, x):
"""
Forward propagate input signal u(n) (at time n) through reservoir.
x: input_size-dimensional input vector
"""
super(LayerEsnReservoir, self).forward(x)
assert self.ins_init, "Res. input weights not yet initialized (ID=%d)." % self.idx
assert self.res_init, "Res. recurrent weights not yet initialized (ID=%d)." % self.idx
# prob =
# dropped = (np.random.rand(*np.shape(self.W_res)) < prob).astype(float)
# mask_n = (np.random.rand(self.num_units,1) > self.drop_probability).astype(float)
# print("V", np.repeat(mask_n, self.num_units, axis=1))
# print("H", np.repeat(mask_n.T, self.num_units, axis=0))
# mask_v = np.repeat(mask_n, self.num_units, axis=1)
# dropped = mask_v * mask_v.T
in_to_res = np.dot(self.W_in, x).squeeze()
self.prev_in_to_res = np.copy(in_to_res)
res_to_res = np.dot(self.state.reshape(1, -1), self.W_res)
self.prev_res_to_res = np.copy(res_to_res)
# Equation (1) in "Formalism and Theory" of Scholarpedia page
self.prev_state = np.copy(self.state)
self.state = (1. - self.echo_param) * self.state + self.echo_param * self.activation(in_to_res + res_to_res)
# self.signals.append(self.state[:self.num_to_store].tolist())
#if self.output_size == self.num_units:
output = self.state.squeeze()
#else:
# return the reservoir state appended to the input
#output = np.hstack((self.state.squeeze(), x))
return output
| 3,265 | 0 | 82 |
cdca8ebb5d916df9ffc113cb00bdb25dd2304953 | 3,997 | py | Python | server side/classify_text.py | yinhaoxiao/EGT-Hackathon-GWU-Team | ef5ba5042c8690f3c297099125d2e763ab36a7eb | [
"RSA-MD"
] | null | null | null | server side/classify_text.py | yinhaoxiao/EGT-Hackathon-GWU-Team | ef5ba5042c8690f3c297099125d2e763ab36a7eb | [
"RSA-MD"
] | null | null | null | server side/classify_text.py | yinhaoxiao/EGT-Hackathon-GWU-Team | ef5ba5042c8690f3c297099125d2e763ab36a7eb | [
"RSA-MD"
] | null | null | null | import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import pickle
import re
word2int_filepath = "./train data/word2int.p"
| 37.35514 | 93 | 0.630973 | import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import pickle
import re
word2int_filepath = "./train data/word2int.p"
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def lstm_cell(num_units):
return tf.contrib.rnn.BasicLSTMCell(num_units, reuse=tf.get_variable_scope().reuse)
def RNN(x, timestamps, num_neurons, weights, biases):
x = tf.unstack(x, timestamps, 1)
lstm_cell = rnn.BasicLSTMCell(num_neurons, forget_bias=1.0)
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights) + biases
def multi_RNN(x, timestamps, num_neurons, weights, biases, num_layers):
x = tf.unstack(x, timestamps, 1)
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[tf.contrib.rnn.BasicLSTMCell(num_neurons) for _ in range(num_layers)])
outputs, states = rnn.static_rnn(stacked_lstm, x, dtype=tf.float32)
return tf.matmul(outputs[-1], weights) + biases
class TextClassifier:
def __init__(self, posts_list):
## convert post to int vector
tf.reset_default_graph()
self.pred = None
with open(word2int_filepath, 'rb') as f:
word2int_index = pickle.load(f)
text_x = []
for post_string in posts_list:
text_x_onerow = []
post_string = re.sub('[^A-Za-z0-9 @]+', '', post_string)
text_words = post_string.split(' ')
for text_word in text_words:
if "@" in text_word:
text_words.remove(text_word)
elif "" == text_word:
text_words.remove(text_word)
for i in range(60):
if i > len(text_words) - 1:
text_x_onerow.append(0)
else:
if text_words[i] in word2int_index:
text_x_onerow.append(word2int_index[text_words[i]])
else:
text_x_onerow.append(0)
text_x.append(text_x_onerow)
### RNN Training Part ###
text_x = np.array(text_x)
print text_x.shape
learning_rate = 1e-3
num_input = 5
timestamps = text_x.shape[1] / 5
num_neurons = timestamps
num_classifications = 2
text_x = text_x.reshape(text_x.shape[0], timestamps, num_input)
x_placeholder = tf.placeholder("float", shape=[None, timestamps, num_input])
y_placeholder = tf.placeholder("float", shape=[None, num_classifications])
#### fully connected layers ####
rnn_W_fc1 = weight_variable([timestamps, timestamps])
rnn_b_fc1 = bias_variable([timestamps])
rnn_h_flat = tf.reshape(x_placeholder, [-1, timestamps]) # flat into 1 dimension
rnn_h_fc1 = tf.nn.relu(tf.matmul(rnn_h_flat, rnn_W_fc1) + rnn_b_fc1)
rnn_W_fc2 = weight_variable([timestamps, timestamps])
rnn_b_fc2 = bias_variable([timestamps])
rnn_h_fc2 = tf.nn.relu(tf.matmul(rnn_h_fc1, rnn_W_fc2) + rnn_b_fc2)
rnn_W_fc3 = weight_variable([timestamps, timestamps])
rnn_b_fc3 = bias_variable([timestamps])
rnn_h_fc3 = tf.nn.relu(tf.matmul(rnn_h_fc2, rnn_W_fc3) + rnn_b_fc3)
rnn_h_fc3 = tf.reshape(rnn_h_fc3, [-1, timestamps, num_input])
####### LSTM layers for label #########
w_lstm1 = weight_variable([num_neurons, num_classifications])
b_lstm1 = bias_variable([num_classifications])
prediction = tf.nn.softmax(RNN(rnn_h_fc3, timestamps, num_neurons, w_lstm1, b_lstm1))
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, './train_model.ckpt')
print "Model Restored"
self.pred = prediction.eval({x_placeholder: text_x}, sess)
def get_prediction(self):
return self.pred
| 3,657 | 0 | 191 |
349db6311e27ac288850009f0ce6afa6c7bc2019 | 5,626 | py | Python | src/huntsman/drp/lsst_tasks.py | danjampro/huntsman-drp | 9470c03b87991fbe09e194470f28e8b45785c206 | [
"MIT"
] | null | null | null | src/huntsman/drp/lsst_tasks.py | danjampro/huntsman-drp | 9470c03b87991fbe09e194470f28e8b45785c206 | [
"MIT"
] | null | null | null | src/huntsman/drp/lsst_tasks.py | danjampro/huntsman-drp | 9470c03b87991fbe09e194470f28e8b45785c206 | [
"MIT"
] | null | null | null | import os
import subprocess
from lsst.pipe.tasks.ingest import IngestTask
from lsst.utils import getPackageDir
from lsst.meas.algorithms import IngestIndexedReferenceTask
# from lsst.pipe.drivers.constructCalibs import BiasTask, FlatTask
from huntsman.drp.utils import date_to_ymd
def ingest_raw_data(filename_list, butler_directory, mode="link", ignore_ingested=False):
"""
"""
# Create the ingest task
task = IngestTask()
task = task.prepareTask(root=butler_directory, mode=mode, ignoreIngested=ignore_ingested)
# Ingest the files
task.ingestFiles(filename_list)
def ingest_reference_catalogue(butler_directory, filenames, output_directory=None):
"""
"""
if output_directory is None:
output_directory = butler_directory
# Load the config file
pkgdir = getPackageDir("obs_huntsman")
config_file = os.path.join(pkgdir, "config", "ingestSkyMapperReference.py")
config = IngestIndexedReferenceTask.ConfigClass()
config.load(config_file)
# Convert the files into the correct format and place them into the repository
args = [butler_directory,
"--configfile", config_file,
"--output", output_directory,
"--clobber-config",
*filenames]
IngestIndexedReferenceTask.parseAndRun(args=args)
def ingest_master_biases(calib_date, butler_directory, calib_directory, rerun, validity=1000):
"""
Ingest the master bias of a given date.
"""
calib_date = date_to_ymd(calib_date)
cmd = f"ingestCalibs.py {butler_directory}"
# TODO - Remove hard-coded directory structure
cmd += f" {butler_directory}/rerun/{rerun}/calib/bias/{calib_date}/*/*.fits"
cmd += f" --validity {validity}"
cmd += f" --calib {calib_directory} --mode=link"
# For some reason we have to provide the config explicitly
config_file = os.path.join(getPackageDir("obs_huntsman"), "config", "ingestBiases.py")
cmd += " --config clobber=True"
cmd += f" --configfile {config_file}"
subprocess.check_output(cmd, shell=True)
def ingest_master_flats(calib_date, butler_directory, calib_directory, rerun, validity=1000):
"""
Ingest the master flat of a given date.
"""
calib_date = date_to_ymd(calib_date)
cmd = f"ingestCalibs.py {butler_directory}"
# TODO - Remove hard-coded directory structure
cmd += f" {butler_directory}/rerun/{rerun}/calib/flat/{calib_date}/*/*.fits"
cmd += f" --validity {validity}"
cmd += f" --calib {calib_directory} --mode=link"
# For some reason we have to provide the config explicitly
config_file = os.path.join(getPackageDir("obs_huntsman"), "config", "ingestFlats.py")
cmd += " --config clobber=True"
cmd += f" --configfile {config_file}"
subprocess.check_output(cmd, shell=True)
def constructBias(calib_date, exptime, ccd, butler_directory, calib_directory, rerun, data_ids,
nodes=1, procs=1):
"""
"""
calib_date = date_to_ymd(calib_date)
cmd = f"constructBias.py {butler_directory} --rerun {rerun}"
cmd += f" --calib {calib_directory}"
cmd += f" --id visit={'^'.join([f'{id}' for id in data_ids])}"
cmd += " dataType='bias'"
cmd += f" expTime={exptime}"
cmd += f" ccd={ccd}"
cmd += f" --nodes {nodes} --procs {procs}"
cmd += f" --calibId expTime={exptime} calibDate={calib_date}"
subprocess.check_output(cmd, shell=True)
def constructFlat(calib_date, filter_name, ccd, butler_directory, calib_directory, rerun, data_ids,
nodes=1, procs=1):
"""
"""
calib_date = date_to_ymd(calib_date)
cmd = f"constructFlat.py {butler_directory} --rerun {rerun}"
cmd += f" --calib {calib_directory}"
cmd += f" --id visit={'^'.join([f'{id}' for id in data_ids])}"
cmd += " dataType='flat'"
cmd += f" filter={filter_name}"
cmd += f" ccd={ccd}"
cmd += f" --nodes {nodes} --procs {procs}"
cmd += f" --calibId filter={filter_name} calibDate={calib_date}"
subprocess.check_output(cmd, shell=True)
def processCcd(butler_directory, calib_directory, rerun, filter_name, dataType='science'):
"""Process ingested exposures."""
cmd = f"processCcd.py {butler_directory} --rerun {rerun}"
cmd += f" --id dataType={dataType} filter={filter_name}"
cmd += f" --calib {calib_directory}"
subprocess.check_output(cmd, shell=True)
def makeDiscreteSkyMap(butler_directory='DATA', rerun='processCcdOutputs:coadd'):
"""Create a sky map that covers processed exposures."""
cmd = f"makeDiscreteSkyMap.py {butler_directory} --id --rerun {rerun} "
cmd += f"--config skyMap.projection='TAN'"
subprocess.check_output(cmd, shell=True)
def makeCoaddTempExp(filter, butler_directory='DATA', calib_directory='DATA/CALIB',
rerun='coadd'):
"""Warp exposures onto sky map."""
cmd = f"makeCoaddTempExp.py {butler_directory} --rerun {rerun} "
cmd += f"--selectId filter={filter} --id filter={filter} tract=0 "
cmd += f"patch=0,0^0,1^0,2^1,0^1,1^1,2^2,0^2,1^2,2 "
cmd += f"--config doApplyUberCal=False"
print(f'The command is: {cmd}')
subprocess.check_output(cmd, shell=True)
def assembleCoadd(filter, butler_directory='DATA', calib_directory='DATA/CALIB',
rerun='coadd'):
"""Assemble the warped exposures into a coadd"""
cmd = f"assembleCoadd.py {butler_directory} --rerun {rerun} "
cmd += f"--selectId filter={filter} --id filter={filter} tract=0 "
cmd += f"patch=0,0^0,1^0,2^1,0^1,1^1,2^2,0^2,1^2,2"
print(f'The command is: {cmd}')
subprocess.check_output(cmd, shell=True)
| 37.013158 | 99 | 0.672414 | import os
import subprocess
from lsst.pipe.tasks.ingest import IngestTask
from lsst.utils import getPackageDir
from lsst.meas.algorithms import IngestIndexedReferenceTask
# from lsst.pipe.drivers.constructCalibs import BiasTask, FlatTask
from huntsman.drp.utils import date_to_ymd
def ingest_raw_data(filename_list, butler_directory, mode="link", ignore_ingested=False):
"""
"""
# Create the ingest task
task = IngestTask()
task = task.prepareTask(root=butler_directory, mode=mode, ignoreIngested=ignore_ingested)
# Ingest the files
task.ingestFiles(filename_list)
def ingest_reference_catalogue(butler_directory, filenames, output_directory=None):
"""
"""
if output_directory is None:
output_directory = butler_directory
# Load the config file
pkgdir = getPackageDir("obs_huntsman")
config_file = os.path.join(pkgdir, "config", "ingestSkyMapperReference.py")
config = IngestIndexedReferenceTask.ConfigClass()
config.load(config_file)
# Convert the files into the correct format and place them into the repository
args = [butler_directory,
"--configfile", config_file,
"--output", output_directory,
"--clobber-config",
*filenames]
IngestIndexedReferenceTask.parseAndRun(args=args)
def ingest_master_biases(calib_date, butler_directory, calib_directory, rerun, validity=1000):
"""
Ingest the master bias of a given date.
"""
calib_date = date_to_ymd(calib_date)
cmd = f"ingestCalibs.py {butler_directory}"
# TODO - Remove hard-coded directory structure
cmd += f" {butler_directory}/rerun/{rerun}/calib/bias/{calib_date}/*/*.fits"
cmd += f" --validity {validity}"
cmd += f" --calib {calib_directory} --mode=link"
# For some reason we have to provide the config explicitly
config_file = os.path.join(getPackageDir("obs_huntsman"), "config", "ingestBiases.py")
cmd += " --config clobber=True"
cmd += f" --configfile {config_file}"
subprocess.check_output(cmd, shell=True)
def ingest_master_flats(calib_date, butler_directory, calib_directory, rerun, validity=1000):
"""
Ingest the master flat of a given date.
"""
calib_date = date_to_ymd(calib_date)
cmd = f"ingestCalibs.py {butler_directory}"
# TODO - Remove hard-coded directory structure
cmd += f" {butler_directory}/rerun/{rerun}/calib/flat/{calib_date}/*/*.fits"
cmd += f" --validity {validity}"
cmd += f" --calib {calib_directory} --mode=link"
# For some reason we have to provide the config explicitly
config_file = os.path.join(getPackageDir("obs_huntsman"), "config", "ingestFlats.py")
cmd += " --config clobber=True"
cmd += f" --configfile {config_file}"
subprocess.check_output(cmd, shell=True)
def constructBias(calib_date, exptime, ccd, butler_directory, calib_directory, rerun, data_ids,
nodes=1, procs=1):
"""
"""
calib_date = date_to_ymd(calib_date)
cmd = f"constructBias.py {butler_directory} --rerun {rerun}"
cmd += f" --calib {calib_directory}"
cmd += f" --id visit={'^'.join([f'{id}' for id in data_ids])}"
cmd += " dataType='bias'"
cmd += f" expTime={exptime}"
cmd += f" ccd={ccd}"
cmd += f" --nodes {nodes} --procs {procs}"
cmd += f" --calibId expTime={exptime} calibDate={calib_date}"
subprocess.check_output(cmd, shell=True)
def constructFlat(calib_date, filter_name, ccd, butler_directory, calib_directory, rerun, data_ids,
nodes=1, procs=1):
"""
"""
calib_date = date_to_ymd(calib_date)
cmd = f"constructFlat.py {butler_directory} --rerun {rerun}"
cmd += f" --calib {calib_directory}"
cmd += f" --id visit={'^'.join([f'{id}' for id in data_ids])}"
cmd += " dataType='flat'"
cmd += f" filter={filter_name}"
cmd += f" ccd={ccd}"
cmd += f" --nodes {nodes} --procs {procs}"
cmd += f" --calibId filter={filter_name} calibDate={calib_date}"
subprocess.check_output(cmd, shell=True)
def processCcd(butler_directory, calib_directory, rerun, filter_name, dataType='science'):
"""Process ingested exposures."""
cmd = f"processCcd.py {butler_directory} --rerun {rerun}"
cmd += f" --id dataType={dataType} filter={filter_name}"
cmd += f" --calib {calib_directory}"
subprocess.check_output(cmd, shell=True)
def makeDiscreteSkyMap(butler_directory='DATA', rerun='processCcdOutputs:coadd'):
"""Create a sky map that covers processed exposures."""
cmd = f"makeDiscreteSkyMap.py {butler_directory} --id --rerun {rerun} "
cmd += f"--config skyMap.projection='TAN'"
subprocess.check_output(cmd, shell=True)
def makeCoaddTempExp(filter, butler_directory='DATA', calib_directory='DATA/CALIB',
rerun='coadd'):
"""Warp exposures onto sky map."""
cmd = f"makeCoaddTempExp.py {butler_directory} --rerun {rerun} "
cmd += f"--selectId filter={filter} --id filter={filter} tract=0 "
cmd += f"patch=0,0^0,1^0,2^1,0^1,1^1,2^2,0^2,1^2,2 "
cmd += f"--config doApplyUberCal=False"
print(f'The command is: {cmd}')
subprocess.check_output(cmd, shell=True)
def assembleCoadd(filter, butler_directory='DATA', calib_directory='DATA/CALIB',
rerun='coadd'):
"""Assemble the warped exposures into a coadd"""
cmd = f"assembleCoadd.py {butler_directory} --rerun {rerun} "
cmd += f"--selectId filter={filter} --id filter={filter} tract=0 "
cmd += f"patch=0,0^0,1^0,2^1,0^1,1^1,2^2,0^2,1^2,2"
print(f'The command is: {cmd}')
subprocess.check_output(cmd, shell=True)
| 0 | 0 | 0 |
2062e70b3770134312574bef452369f5028b0f14 | 9,973 | py | Python | bot.py | jack-davidson/tcubed-bot | f2a957f53c9a40300f33d7eae678b0dacba97382 | [
"MIT"
] | null | null | null | bot.py | jack-davidson/tcubed-bot | f2a957f53c9a40300f33d7eae678b0dacba97382 | [
"MIT"
] | null | null | null | bot.py | jack-davidson/tcubed-bot | f2a957f53c9a40300f33d7eae678b0dacba97382 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import discord
import json
import requests
import math
client = discord.Client()
sessions = []
session_id = 0
API_HOST = "99.189.77.224"
API_PORT = "8000"
with open("discord_token", "r") as f:
token = f.read()
f.close
turns = {'O': Player.O, 'E': Player.E, 'X': Player.X}
reverse_turns = {value: key for (key, value) in turns.items()}
# convert string to Player
# convert Player to string
# format http uri with host and port
# make json get request
# make an api request to tcubed api and return best move given board and player
# ttt main function (process args etc)
@client.event
@client.event
client.run(token)
| 28.658046 | 129 | 0.539557 | #!/usr/bin/python3
import discord
import json
import requests
import math
client = discord.Client()
sessions = []
session_id = 0
API_HOST = "99.189.77.224"
API_PORT = "8000"
with open("discord_token", "r") as f:
token = f.read()
f.close
class Player:
O = -1
E = 0
X = 1
turns = {'O': Player.O, 'E': Player.E, 'X': Player.X}
reverse_turns = {value: key for (key, value) in turns.items()}
# convert string to Player
def deserialize_turn(player: str) -> int:
return turns[player]
# convert Player to string
def serialize_turn(player: 'Player') -> str:
return reverse_turns[player]
def deserialize_board(board: str) -> list[list[Player]]:
return [
[deserialize_turn(x) for x in board[i:i + int(math.sqrt(len(board)))]]
for i in range(0, len(board), int(math.sqrt(len(board))))]
def serialize_board(board_matrix):
board_string = ""
for row in board_matrix:
for cell in row:
board_string += serialize_turn(cell)
return board_string
# format http uri with host and port
def format_uri(host, port):
return "http://" + host + ":" + port
# make json get request
def api_request(uri):
return json.loads(
requests.get(
uri
).content
)
# make an api request to tcubed api and return best move given board and player
def best_move(board, player):
return api_request(
format_uri(API_HOST, API_PORT) + "/board/" + board + "/player/"
+ player
)
class Session:
class MoveAlreadyTakenError(Exception):
pass
def __init__(self, board_string, guest="unkown", owner="unkown",
bot=False):
global session_id
self.board_matrix = deserialize_board(board_string)
self.player = Player.X
self.owner = owner
self.guest = guest
self.bot = bot
sessions.append(self)
session_id = len(sessions) - 1
def next_player(self):
self.player = -self.player
def move(self, row, col):
if self.board_matrix[row][col] == Player.E:
self.board_matrix[row][col] = self.player
self.next_player()
else:
raise Session.MoveAlreadyTakenError
def moves_left(self):
for row in self.board_matrix:
for cell in row:
if cell == Player.E:
return True
return False
def __str__(self):
board_string = ""
i = 1
for row in self.board_matrix:
for cell in row:
board_string += " "
if cell != Player.E:
board_string += serialize_turn(cell)
else:
board_string += str(i)
i += 1
board_string += "\n"
return "```toml\n" \
f"[session id: {str(session_id)}] [player: " \
f"{serialize_turn(self.player)}]\n\n" \
f"{board_string}```"
# THIS IS HORRIBLE CODE
def evaluate(self) -> bool:
# Checking for Rows for X or O victory.
for row in range(3):
if self.board_matrix[row][0] == self.board_matrix[row][1] and self.board_matrix[row][1] == self.board_matrix[row][2]:
if self.board_matrix[row][0] == Player.X:
return 1
elif self.board_matrix[row][0] == Player.O:
return -1
# Checking for Columns for X or O victory.
for col in range(3):
if self.board_matrix[0][col] == self.board_matrix[1][col] and self.board_matrix[1][col] == self.board_matrix[2][col]:
if self.board_matrix[0][col] == Player.X:
return 1
elif self.board_matrix[0][col] == Player.O:
return -1
# Checking for Diagonals for X or O victory.
if self.board_matrix[0][0] == self.board_matrix[1][1] and self.board_matrix[1][1] == self.board_matrix[2][2]:
if self.board_matrix[0][0] == Player.X:
return 1
elif self.board_matrix[0][0] == Player.O:
return -1
if self.board_matrix[0][2] == self.board_matrix[1][1] and self.board_matrix[1][1] == self.board_matrix[2][0]:
if self.board_matrix[0][2] == Player.X:
return 1
elif self.board_matrix[0][2] == Player.O:
return -1
# Else if none of them have won then return 0
return 0
async def message_log(message, msg, err=False):
await message.channel.send("```diff\n" + "-" if err else "+" + " " + msg
+ "```")
async def new(message, args, bot=False):
board = Session("E" * 9, owner=str(message.author))
if bot is not False:
board.guest = client.user
board.bot = bot
else:
board.guest = str(message.mentions[0])
await message.channel.send(str(board))
async def move(message, args):
global session_id
k = 1
board = sessions[session_id]
if str(message.author) not in [board.owner, board.guest]:
await message_log(message,
f"allowed users of session {session_id} "
f"are: {board.owner} and {board.guest}```",
err=True)
return
if not board.moves_left():
await message.channel.send("there are no moves left")
sessions.pop(session_id)
return
for i in range(3):
for j in range(3):
if k == int(args[2]):
try:
board.move(i, j)
win = board.evaluate()
if win is not Player.E:
await message.channel.send(str(board))
await message.channel.send(f"```diff\n+{message.author} wins!```")
await message_log(message, f"{message.author} wins!")
sessions.pop(session_id)
return
except Session.MoveAlreadyTakenError:
await message.channel.send(
"```diff\n-move already taken by "
f"{serialize_turn(board.board_matrix[i][j])}```"
)
return
k += 1
if board.bot is not False:
await message.channel.send(str(board))
if not board.moves_left():
await message.channel.send("there are no moves left")
return
await message.channel.send("I'm thinking gimme a sec")
board.move(*best_move(serialize_board(board.board_matrix),
serialize_turn(board.player)))
win = board.evaluate()
if win is not Player.E:
await message.channel.send(str(board))
await message.channel.send(f"```diff\n+{board.guest} wins!```")
await message.channel.send("gg")
sessions.pop(session_id)
return
await message.channel.send(str(board))
async def select(message, args):
global session_id
if int(args[2]) <= len(sessions) - 1:
session_id = int(args[2])
await list_sessions(message)
async def list_sessions(message):
board_message = "```toml\n[boards]:\n"
for i in range(len(sessions)):
if i == session_id:
board_message += f"\t[session_id: {i}] [player 1 (X): " \
f"{sessions[i].owner}] [player 2 (O): {sessions[i].guest}]\n"
else:
board_message += f"\t session_id: {i} | player 1 (X): " \
f"{sessions[i].owner} | player 2 (O): {sessions[i].guest}\n"
board_message += "```"
await message.channel.send(board_message)
async def ttt_help(message):
with open("README.md", "r") as f:
await message.channel.send("```md\nREADME.md\n\n" + f.read() + "```")
f.close()
async def usage(message):
await message.channel.send("```Usage: ttt COMMAND [ARGS] ..."
"\nTry 'ttt help' for more information."
"\nTry 'ttt license' for license information."
"```")
async def license(message):
with open("LICENSE", "r") as f:
await message.channel.send("```\n" + f.read() + "```")
f.close()
# ttt main function (process args etc)
async def ttt(message, args):
if args[1] == "help":
await ttt_help(message)
return
if args[1] == "license":
await license(message)
return
if args[1] == "move":
await move(message, args)
return
if args[1] == "select":
await select(message, args)
return
if args[1] == "new":
if len(args) == 3:
if message.mentions[0] == client.user:
await new(message, args, bot=Player.O)
else:
await new(message, args)
else:
await message.channel.send("```diff\n-please mention your "
"opponent after your command: 'ttt new "
"@user'```")
return
if args[1] == "list":
await list_sessions(message)
return
if args[1] == "remove":
sessions.pop(int(args[2]))
await message.channel.send("```diff\n+successfully removed session 0 "
f"{int(args[2])}```")
return
if args[1] == "print":
await message.channel.send(str(sessions[session_id]))
return
@client.event
async def on_ready():
print(f"[Connected]: ({client.user})")
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content == "ttt":
await usage(message)
return
args = message.content.split()
if args[0] == "ttt":
print(f"[New Connection] {message.author}")
await ttt(message, args)
client.run(token)
| 8,580 | 263 | 452 |
0a8bf2e0f4e8cf8222bca21d94e9261add7f8e30 | 6,148 | py | Python | tools/codeformat.py | petrkr/micropython | 4371c971e3dfb743388ccb493c137a25aa9cdd35 | [
"MIT"
] | 79 | 2019-02-07T09:04:50.000Z | 2022-02-20T06:54:44.000Z | tools/codeformat.py | BigCircleLaw/micropython | 383adb654cfd4b818240ba197fdf25166401c343 | [
"MIT"
] | 100 | 2019-05-16T09:25:23.000Z | 2021-09-20T07:46:54.000Z | tools/codeformat.py | BigCircleLaw/micropython | 383adb654cfd4b818240ba197fdf25166401c343 | [
"MIT"
] | 25 | 2019-03-20T08:16:57.000Z | 2022-03-11T17:59:36.000Z | #!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Damien P. George
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import glob
import itertools
import os
import re
import subprocess
# Relative to top-level repo dir.
PATHS = [
# C
"extmod/*.[ch]",
"lib/netutils/*.[ch]",
"lib/timeutils/*.[ch]",
"lib/utils/*.[ch]",
"mpy-cross/*.[ch]",
"ports/*/*.[ch]",
"ports/windows/msvc/**/*.[ch]",
"py/*.[ch]",
# Python
"drivers/**/*.py",
"examples/**/*.py",
"extmod/**/*.py",
"ports/**/*.py",
"py/**/*.py",
"tools/**/*.py",
"tests/**/*.py",
]
EXCLUSIONS = [
# STM32 build includes generated Python code.
"ports/*/build*",
# gitignore in ports/unix ignores *.py, so also do it here.
"ports/unix/*.py",
# not real python files
"tests/**/repl_*.py",
# needs careful attention before applying automatic formatting
"tests/basics/*.py",
]
# Path to repo top-level dir.
TOP = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
UNCRUSTIFY_CFG = os.path.join(TOP, "tools/uncrustify.cfg")
C_EXTS = (
".c",
".h",
)
PY_EXTS = (".py",)
FIXUP_REPLACEMENTS = ((re.compile("sizeof\(([a-z_]+)\) \*\(([a-z_]+)\)"), r"sizeof(\1) * (\2)"),)
if __name__ == "__main__":
main()
| 33.78022 | 97 | 0.596779 | #!/usr/bin/env python3
#
# This file is part of the MicroPython project, http://micropython.org/
#
# The MIT License (MIT)
#
# Copyright (c) 2020 Damien P. George
# Copyright (c) 2020 Jim Mussared
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import argparse
import glob
import itertools
import os
import re
import subprocess
# Relative to top-level repo dir.
PATHS = [
# C
"extmod/*.[ch]",
"lib/netutils/*.[ch]",
"lib/timeutils/*.[ch]",
"lib/utils/*.[ch]",
"mpy-cross/*.[ch]",
"ports/*/*.[ch]",
"ports/windows/msvc/**/*.[ch]",
"py/*.[ch]",
# Python
"drivers/**/*.py",
"examples/**/*.py",
"extmod/**/*.py",
"ports/**/*.py",
"py/**/*.py",
"tools/**/*.py",
"tests/**/*.py",
]
EXCLUSIONS = [
# STM32 build includes generated Python code.
"ports/*/build*",
# gitignore in ports/unix ignores *.py, so also do it here.
"ports/unix/*.py",
# not real python files
"tests/**/repl_*.py",
# needs careful attention before applying automatic formatting
"tests/basics/*.py",
]
# Path to repo top-level dir.
TOP = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
UNCRUSTIFY_CFG = os.path.join(TOP, "tools/uncrustify.cfg")
C_EXTS = (
".c",
".h",
)
PY_EXTS = (".py",)
FIXUP_REPLACEMENTS = ((re.compile("sizeof\(([a-z_]+)\) \*\(([a-z_]+)\)"), r"sizeof(\1) * (\2)"),)
def list_files(paths, exclusions=None, prefix=""):
files = set()
for pattern in paths:
files.update(glob.glob(os.path.join(prefix, pattern), recursive=True))
for pattern in exclusions or []:
files.difference_update(glob.fnmatch.filter(files, os.path.join(prefix, pattern)))
return sorted(files)
def fixup_c(filename):
# Read file.
with open(filename) as f:
lines = f.readlines()
# Write out file with fixups.
with open(filename, "w", newline="") as f:
dedent_stack = []
while lines:
# Get next line.
l = lines.pop(0)
# Dedent #'s to match indent of following line (not previous line).
m = re.match(r"( +)#(if |ifdef |ifndef |elif |else|endif)", l)
if m:
indent = len(m.group(1))
directive = m.group(2)
if directive in ("if ", "ifdef ", "ifndef "):
l_next = lines[0]
indent_next = len(re.match(r"( *)", l_next).group(1))
if indent - 4 == indent_next and re.match(r" +(} else |case )", l_next):
# This #-line (and all associated ones) needs dedenting by 4 spaces.
l = l[4:]
dedent_stack.append(indent - 4)
else:
# This #-line does not need dedenting.
dedent_stack.append(-1)
else:
if dedent_stack[-1] >= 0:
# This associated #-line needs dedenting to match the #if.
indent_diff = indent - dedent_stack[-1]
assert indent_diff >= 0
l = l[indent_diff:]
if directive == "endif":
dedent_stack.pop()
# Apply general regex-based fixups.
for regex, replacement in FIXUP_REPLACEMENTS:
l = regex.sub(replacement, l)
# Write out line.
f.write(l)
assert not dedent_stack, filename
def main():
cmd_parser = argparse.ArgumentParser(description="Auto-format C and Python files.")
cmd_parser.add_argument("-c", action="store_true", help="Format C code only")
cmd_parser.add_argument("-p", action="store_true", help="Format Python code only")
cmd_parser.add_argument("files", nargs="*", help="Run on specific globs")
args = cmd_parser.parse_args()
# Setting only one of -c or -p disables the other. If both or neither are set, then do both.
format_c = args.c or not args.p
format_py = args.p or not args.c
# Expand the globs passed on the command line, or use the default globs above.
files = []
if args.files:
files = list_files(args.files)
else:
files = list_files(PATHS, EXCLUSIONS, TOP)
# Extract files matching a specific language.
def lang_files(exts):
for file in files:
if os.path.splitext(file)[1].lower() in exts:
yield file
# Run tool on N files at a time (to avoid making the command line too long).
def batch(cmd, files, N=200):
while True:
file_args = list(itertools.islice(files, N))
if not file_args:
break
subprocess.check_call(cmd + file_args)
# Format C files with uncrustify.
if format_c:
batch(["uncrustify", "-c", UNCRUSTIFY_CFG, "-lC", "--no-backup"], lang_files(C_EXTS))
for file in lang_files(C_EXTS):
fixup_c(file)
# Format Python files with black.
if format_py:
batch(["black", "-q", "--fast", "--line-length=99"], lang_files(PY_EXTS))
if __name__ == "__main__":
main()
| 3,645 | 0 | 69 |
7e7a73441c1d6029aaf8099421accd0e4f27ebb3 | 3,926 | py | Python | proxmox_plugin/proxmox.py | Dertosh/letsencrypt-proxmox | cff654f6e92ef371790a39985c0d945de2b8169c | [
"Apache-2.0"
] | 46 | 2016-04-06T19:15:09.000Z | 2022-01-19T19:47:43.000Z | proxmox_plugin/proxmox.py | andieandpal/letsencrypt-proxmox | cff654f6e92ef371790a39985c0d945de2b8169c | [
"Apache-2.0"
] | 1 | 2016-06-28T13:58:36.000Z | 2017-04-19T13:17:41.000Z | proxmox_plugin/proxmox.py | andieandpal/letsencrypt-proxmox | cff654f6e92ef371790a39985c0d945de2b8169c | [
"Apache-2.0"
] | 13 | 2016-07-17T04:37:25.000Z | 2019-07-26T08:23:44.000Z | """Proxmox plugin for Let's Encrypt client"""
import os
import subprocess
import logging
import zope.component
import zope.interface
from letsencrypt import interfaces
from letsencrypt.plugins import common
from letsencrypt import errors
from shutil import copyfile
logger = logging.getLogger(__name__)
| 35.690909 | 103 | 0.618186 | """Proxmox plugin for Let's Encrypt client"""
import os
import subprocess
import logging
import zope.component
import zope.interface
from letsencrypt import interfaces
from letsencrypt.plugins import common
from letsencrypt import errors
from shutil import copyfile
logger = logging.getLogger(__name__)
class ProxmoxInstaller(common.Plugin):
zope.interface.implements(interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Proxmox VE plugin for Let's Encrypt client"
@classmethod
def add_parser_arguments(cls, add):
add("location", default="/etc/pve/local", help="Location of Proxmox VE certificates.")
def prepare(self):
pass # pragma: no cover
def more_info(self):
return "Automatically deploy SSL certificate to Proxmox VE."
def get_all_names(self):
return []
def deploy_cert(self, domain, cert_path, key_path, chain_path=None, fullchain_path=None):
if not fullchain_path:
raise errors.PluginError(
"The proxmox plugin currently requires --fullchain-path to "
"install a cert.")
logger.info("Copy certificate")
copyfile(key_path, os.path.join(self.conf("location"),"pve-ssl.key"))
copyfile(fullchain_path, os.path.join(self.conf("location"),"pve-ssl.pem"))
def enhance(self, domain, enhancement, options=None):
pass # pragma: no cover
def supported_enhancements(self):
return []
def get_all_certs_keys(self):
return []
def save(self, title=None, temporary=False):
pass # pragma: no cover
def rollback_checkpoints(self, rollback=1):
pass # pragma: no cover
def recovery_routine(self):
pass # pragma: no cover
def view_config_changes(self):
pass # pragma: no cover
def config_test(self):
pass # pragma: no cover
def restart(self):
def is_pid_1_systemd():
try:
cmdline = open('/proc/1/cmdline', 'rb').read(7)
return cmdline.startswith('systemd')
except IOError:
return false
def execute_command(command):
logger.info("Executing command: %s" % command)
try:
proc = subprocess.Popen(command)
proc.wait()
if proc.returncode != 0:
logger.error("PVE API Proxy Server restart command returned an error")
except (OSError, ValueError) as e:
logger.error("Failed to execute the restart pveproxy command")
if is_pid_1_systemd():
logger.info("Using systemd to restart PVE API Proxy Server")
unit_script_locations = ['/usr/lib/systemd/system/', '/etc/systemd/system/']
pveproxy_service_names = ['pveproxy.service']
for path in unit_script_locations:
for name in pveproxy_service_names:
full_path = os.path.join(path, name)
if os.path.isfile(full_path):
logger.info("Found the PVE API Proxy Server file at %s" % full_path)
execute_command(['systemctl', 'restart', name])
return
logger.error("Found systemd but not the PVE API Proxy Server so it could not be restarted")
else:
logger.info("Using init scripts and the service command to restart PVE API Proxy Server")
init_script_names = ['pveproxy']
for path in init_script_names:
if os.path.isfile(os.path.join('/etc/init.d/', path)):
logger.info("Found the PVE API Proxy Server init script at %s" % path)
execute_command(['service', path, 'restart'])
return
logging.error("Did not find the PVE API Proxy Server so it could not be restarted")
| 3,009 | 589 | 23 |
fa71658d6ed923439e099005e674793039a6d737 | 679 | py | Python | top_secret/preprocessors.py | trym-inc/top-secret | dd5681a682745e2fedc9e80cca99445d143d18e9 | [
"MIT"
] | null | null | null | top_secret/preprocessors.py | trym-inc/top-secret | dd5681a682745e2fedc9e80cca99445d143d18e9 | [
"MIT"
] | null | null | null | top_secret/preprocessors.py | trym-inc/top-secret | dd5681a682745e2fedc9e80cca99445d143d18e9 | [
"MIT"
] | null | null | null | import base64
import json
from decimal import Decimal
from .cast_handlers import bool_cast_handler
| 16.975 | 44 | 0.589102 | import base64
import json
from decimal import Decimal
from .cast_handlers import bool_cast_handler
def base64preprocessor(value):
return base64.b64decode(value).decode()
def base32preprocessor(value):
return base64.b32decode(value).decode()
def typed_preprocessor(value):
type, value = value.split(':', 1)
handler = {
'i': int,
'int': int,
'f': float,
'float': float,
'd': Decimal,
'decimal': Decimal,
'b': bool_cast_handler,
'bool': bool_cast_handler,
's': str,
'string': str,
'j': json.loads,
'json': json.loads,
}[type]
return handler(value)
| 507 | 0 | 69 |
7594496ca540ca580732cd8a0b0cf3b463539050 | 2,104 | py | Python | project/scripts/update_eo_version.py | polystat/c2eo | b9f30cd010ee36c4d8827f7909780f462e9b73d6 | [
"MIT"
] | 12 | 2021-08-05T12:12:09.000Z | 2022-03-08T13:33:53.000Z | project/scripts/update_eo_version.py | polystat/c2eo | b9f30cd010ee36c4d8827f7909780f462e9b73d6 | [
"MIT"
] | 26 | 2021-08-23T10:25:37.000Z | 2022-03-30T12:56:08.000Z | project/scripts/update_eo_version.py | polystat/c2eo | b9f30cd010ee36c4d8827f7909780f462e9b73d6 | [
"MIT"
] | 12 | 2021-08-17T09:20:07.000Z | 2022-03-31T13:37:28.000Z | #! /usr/bin/python3
import sys
import re as regex
# Our scripts
import tools
import settings
if __name__ == '__main__':
tools.move_to_script_dir(sys.argv[0])
main()
| 30.941176 | 91 | 0.694392 | #! /usr/bin/python3
import sys
import re as regex
# Our scripts
import tools
import settings
def main():
tools.pprint()
current_version = settings.get_setting('current_eo_version')
latest_version = settings.get_setting('latest_eo_version')
is_latest_version, latest_version = is_update_needed(current_version, latest_version)
if is_latest_version:
return
found_files = tools.search_files_by_pattern('../../', 'pom.xml', recursive=True)
update_version_in_files(found_files, latest_version)
settings.set_setting('current_eo_version', latest_version)
tools.pprint('EO version updated\n')
def is_update_needed(current_version, latest_version):
compare = tools.version_compare(current_version, latest_version)
is_latest_version = False
if compare == 1:
latest_version = current_version
tools.pprint(f'Manual update latest EO version to {latest_version}', status='WARN')
elif compare == 0:
is_latest_version = True
tools.pprint('We use latest EO version', status='PASS')
tools.pprint()
else:
tools.pprint(f'We use old EO version: "{current_version}"', status='WARN')
tools.pprint(f'Start updating files')
return is_latest_version, latest_version
def update_version_in_files(files, latest_version):
tools.pprint('Updating version')
count_changed_files = 0
pattern = r'<eolang\.version>.*<\/eolang\.version>'
latest_version_declaration = f'<eolang.version>{latest_version}</eolang.version>'
for file in files:
with open(file, 'r') as f:
data = f.read()
result = regex.search(pattern, data)
if (not result) or (latest_version_declaration in result.group()):
continue
new_data = regex.sub(pattern, latest_version_declaration, data)
with open(file, 'w') as f:
f.write(new_data)
count_changed_files += 1
tools.pprint(f'{count_changed_files} files updated')
return count_changed_files
if __name__ == '__main__':
tools.move_to_script_dir(sys.argv[0])
main()
| 1,855 | 0 | 69 |
fa75ee19b6b78380bbc91dda8cf8601d222c3af8 | 8,913 | py | Python | gridly_cli/gridly.py | gridly-spreadsheet-CMS/gridly-cli | fb0f8384096208c787dfc642ddd556c848de732e | [
"MIT"
] | null | null | null | gridly_cli/gridly.py | gridly-spreadsheet-CMS/gridly-cli | fb0f8384096208c787dfc642ddd556c848de732e | [
"MIT"
] | null | null | null | gridly_cli/gridly.py | gridly-spreadsheet-CMS/gridly-cli | fb0f8384096208c787dfc642ddd556c848de732e | [
"MIT"
] | 1 | 2021-10-03T05:55:12.000Z | 2021-10-03T05:55:12.000Z | import click
import requests
import os
import json
import questionary
from questionary import Separator, Choice, prompt
from tabulate import tabulate
import gridly_cli.api as api
from gridly_cli.utils import records_data_to_json, dump_to_json_file, dump_to_csv_file
headers = {
'Content-Type': 'application/json',
'Authorization': 'ApiKey ' + str(os.environ["GRIDLY_API_KEY"])
}
@click.group()
def gridly():
"""A CLI wrapper for the API of Gridly."""
pass
####### Grid #######
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True, help='To list all Grids')
@click.option('-u', 'action', flag_value='u', help='To update Grid name')
def grid(action):
"""
List all Grids / Update Grid name
"""
if action == 'ls':
db_id = choose_database()
response = api.get_grids(db_id)
for grid in response:
click.echo(grid["name"])
elif action == 'u':
grid_id = choose_grid()
grid_name = questionary.text("New Grid name:").ask()
data = {
"name": grid_name
}
api.update_grid(grid_id, data)
click.echo("Your Grid has been changed")
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True)
def project(action):
"""
List all Projects
"""
if action == 'ls':
response = api.get_projects()
for project in response:
click.echo(project["name"])
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True)
def database(action):
"""
List all Databases
"""
if action == 'ls':
project_id = choose_project()
response = api.get_databases(project_id)
for database in response:
click.echo(database["name"])
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', help='To list all views')
@click.argument('view_id', required=False)
def view(action, view_id):
"""
List all views / Get info of a specified view
"""
if action == 'ls':
grid_id = choose_grid()
response = api.get_views(grid_id)
for view in response:
click.echo(view["name"])
elif view_id is not None:
view = api.get_view(view_id)
click.echo(json.dumps(view, indent=4))
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True)
def column(action):
"""
List all columns of a Grid
"""
if action == 'ls':
grid_id = choose_grid()
response = api.get_grid(grid_id)
columns = response.get("columns")
ls_column = []
for column in columns:
ls_column.append([column["id"], column["name"], column["type"]])
click.echo("Grid name: " + response.get("name"))
click.echo(tabulate(ls_column, headers=["Column ID", "Column Name", "Column Type"], tablefmt="grid"))
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True, help='To list all records of a view')
@click.option('-d', 'action', flag_value='d', help='To delete records')
def record(action):
"""
List all records of a view / Delete records
"""
if action == 'ls':
view_id = choose_view()
response_columns = api.get_view(view_id)
columns = response_columns.get("columns")
response_records = api.get_records(view_id)
# Set up column keys before add value to each column
ls_cell = {} # ls_cell is a dictionary
for cell in response_records:
unique_cell = cell["cells"]
for value in unique_cell:
ls_cell.setdefault(value["columnId"], [])
# Map value to column
for cell in response_records:
unique_cell = cell["cells"]
for value in unique_cell:
if value["columnId"] in ls_cell and "value" in value:
ls_cell[value["columnId"]].append(value["value"])
elif value["columnId"] in ls_cell and "value" not in value:
ls_cell[value["columnId"]].append("")
else:
continue
for column in columns:
if column["id"] in ls_cell:
ls_cell[column["name"]] = ls_cell.pop(column["id"])
else:
continue
click.echo(tabulate(ls_cell, headers="keys", tablefmt="grid"))
elif action == 'd':
view_id = choose_view()
response_records = api.get_records(view_id)
ls_record_id = []
for record in response_records:
ls_record_id.append(record["id"])
ls_chosen_record = questionary.checkbox(
'Select records which you want delete',
choices=ls_record_id).ask()
data = {
"ids": ls_chosen_record
}
api.delete_records(view_id, data)
else:
gridly()
@gridly.command()
@click.option('-json', 'type_json', flag_value='json', default=False, help="To export to JSON file type")
@click.option('-csv', 'type_csv', flag_value='csv', default=False, help="To export to CSV file type")
@click.option('-lang', 'target', flag_value='lang', default=False, help="To export translation language columns to separate files")
@click.argument('view_id')
@click.argument('dest', type=click.Path(exists=True), default='./', required=False)
def export(type_json, type_csv , target, view_id, dest):
"""
Export all records of a view to JSON and/or CSV files
"""
rs_records = api.get_records(view_id)
records = records_data_to_json(rs_records)
lang_columns = []
lang_records = {}
if target == 'lang':
view = api.get_view(view_id)
for column in view["columns"]:
if 'languageCode' in column:
lang_columns.append(column["languageCode"])
for lang in lang_columns:
lang_records[lang] = api.split_column(records, lang)
else:
lang_records["all"] = records
if type_json == 'json':
for lang in lang_records:
file_path = f'{dest}grid_{view_id}_{lang}.json'
dump_to_json_file(file_path, lang_records[lang])
click.echo(f'!!!SUCCESS exported to: {file_path}')
if type_csv == 'csv':
for lang in lang_records:
file_path = f'{dest}grid_{view_id}_{lang}.csv'
dump_to_csv_file(file_path, lang_records[lang])
click.echo(f'!!!SUCCESS exported to: {file_path}')
if __name__ == '__main__':
gridly() | 29.032573 | 131 | 0.600359 | import click
import requests
import os
import json
import questionary
from questionary import Separator, Choice, prompt
from tabulate import tabulate
import gridly_cli.api as api
from gridly_cli.utils import records_data_to_json, dump_to_json_file, dump_to_csv_file
headers = {
'Content-Type': 'application/json',
'Authorization': 'ApiKey ' + str(os.environ["GRIDLY_API_KEY"])
}
@click.group()
def gridly():
"""A CLI wrapper for the API of Gridly."""
pass
def choose_project():
projects = api.get_projects()
projectname = []
projectid = ""
for project in projects:
projectname.append(project["name"])
chosen_projectname = questionary.select(
"Choose your Project:",
choices=projectname).ask()
for project in projects:
if chosen_projectname == str(project["name"]):
projectid = str(project["id"])
else:
continue
return projectid
def choose_database():
project_id = choose_project()
databases = api.get_databases(project_id)
databasename = []
databaseid = ""
for database in databases:
databasename.append(database["name"])
chosen_databasename = questionary.select(
"Choose your Database:",
choices=databasename).ask()
for database in databases:
if chosen_databasename == str(database["name"]):
databaseid = str(database["id"])
else:
continue
return databaseid
def choose_grid():
db_id = choose_database()
grids = api.get_grids(db_id)
gridname = []
gridid = ""
for grid in grids:
gridname.append(grid["name"])
chosen_gridname = questionary.select(
"Choose your Grid:",
choices=gridname).ask()
for grid in grids:
if chosen_gridname == str(grid["name"]):
gridid = str(grid["id"])
else:
continue
return gridid
def choose_view():
grid_id = choose_grid()
views = api.get_views(grid_id)
viewname = []
viewid = ""
for view in views:
viewname.append(view["name"])
chosen_viewname = questionary.select(
"Choose your view:",
choices=viewname).ask()
for view in views:
if chosen_viewname == str(view["name"]):
viewid = str(view["id"])
else:
continue
return viewid
def choose_columns(view_id):
view = api.get_view(view_id)
options = ['All']
columns = view["columns"]
for column in columns:
options.append(column["id"])
ls_chosen_columns = questionary.checkbox('Select columns to export', choices=options).ask()
if 'All' in ls_chosen_columns:
return options
else:
return ls_chosen_columns
####### Grid #######
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True, help='To list all Grids')
@click.option('-u', 'action', flag_value='u', help='To update Grid name')
def grid(action):
"""
List all Grids / Update Grid name
"""
if action == 'ls':
db_id = choose_database()
response = api.get_grids(db_id)
for grid in response:
click.echo(grid["name"])
elif action == 'u':
grid_id = choose_grid()
grid_name = questionary.text("New Grid name:").ask()
data = {
"name": grid_name
}
api.update_grid(grid_id, data)
click.echo("Your Grid has been changed")
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True)
def project(action):
"""
List all Projects
"""
if action == 'ls':
response = api.get_projects()
for project in response:
click.echo(project["name"])
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True)
def database(action):
"""
List all Databases
"""
if action == 'ls':
project_id = choose_project()
response = api.get_databases(project_id)
for database in response:
click.echo(database["name"])
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', help='To list all views')
@click.argument('view_id', required=False)
def view(action, view_id):
"""
List all views / Get info of a specified view
"""
if action == 'ls':
grid_id = choose_grid()
response = api.get_views(grid_id)
for view in response:
click.echo(view["name"])
elif view_id is not None:
view = api.get_view(view_id)
click.echo(json.dumps(view, indent=4))
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True)
def column(action):
"""
List all columns of a Grid
"""
if action == 'ls':
grid_id = choose_grid()
response = api.get_grid(grid_id)
columns = response.get("columns")
ls_column = []
for column in columns:
ls_column.append([column["id"], column["name"], column["type"]])
click.echo("Grid name: " + response.get("name"))
click.echo(tabulate(ls_column, headers=["Column ID", "Column Name", "Column Type"], tablefmt="grid"))
else:
gridly()
@gridly.command()
@click.option('-ls', 'action', flag_value='ls', default=True, help='To list all records of a view')
@click.option('-d', 'action', flag_value='d', help='To delete records')
def record(action):
"""
List all records of a view / Delete records
"""
if action == 'ls':
view_id = choose_view()
response_columns = api.get_view(view_id)
columns = response_columns.get("columns")
response_records = api.get_records(view_id)
# Set up column keys before add value to each column
ls_cell = {} # ls_cell is a dictionary
for cell in response_records:
unique_cell = cell["cells"]
for value in unique_cell:
ls_cell.setdefault(value["columnId"], [])
# Map value to column
for cell in response_records:
unique_cell = cell["cells"]
for value in unique_cell:
if value["columnId"] in ls_cell and "value" in value:
ls_cell[value["columnId"]].append(value["value"])
elif value["columnId"] in ls_cell and "value" not in value:
ls_cell[value["columnId"]].append("")
else:
continue
for column in columns:
if column["id"] in ls_cell:
ls_cell[column["name"]] = ls_cell.pop(column["id"])
else:
continue
click.echo(tabulate(ls_cell, headers="keys", tablefmt="grid"))
elif action == 'd':
view_id = choose_view()
response_records = api.get_records(view_id)
ls_record_id = []
for record in response_records:
ls_record_id.append(record["id"])
ls_chosen_record = questionary.checkbox(
'Select records which you want delete',
choices=ls_record_id).ask()
data = {
"ids": ls_chosen_record
}
api.delete_records(view_id, data)
else:
gridly()
@gridly.command()
@click.option('-json', 'type_json', flag_value='json', default=False, help="To export to JSON file type")
@click.option('-csv', 'type_csv', flag_value='csv', default=False, help="To export to CSV file type")
@click.option('-lang', 'target', flag_value='lang', default=False, help="To export translation language columns to separate files")
@click.argument('view_id')
@click.argument('dest', type=click.Path(exists=True), default='./', required=False)
def export(type_json, type_csv , target, view_id, dest):
"""
Export all records of a view to JSON and/or CSV files
"""
rs_records = api.get_records(view_id)
records = records_data_to_json(rs_records)
lang_columns = []
lang_records = {}
if target == 'lang':
view = api.get_view(view_id)
for column in view["columns"]:
if 'languageCode' in column:
lang_columns.append(column["languageCode"])
for lang in lang_columns:
lang_records[lang] = api.split_column(records, lang)
else:
lang_records["all"] = records
if type_json == 'json':
for lang in lang_records:
file_path = f'{dest}grid_{view_id}_{lang}.json'
dump_to_json_file(file_path, lang_records[lang])
click.echo(f'!!!SUCCESS exported to: {file_path}')
if type_csv == 'csv':
for lang in lang_records:
file_path = f'{dest}grid_{view_id}_{lang}.csv'
dump_to_csv_file(file_path, lang_records[lang])
click.echo(f'!!!SUCCESS exported to: {file_path}')
if __name__ == '__main__':
gridly() | 2,125 | 0 | 115 |
ea67a887ab47a66a4990b89a906e18d13760e828 | 6,783 | py | Python | tests/renderables/test_paginated_table.py | sauljabin/kaskade | fe270821dd05459df11adaabbd5a0ea39809456e | [
"MIT"
] | 16 | 2021-10-02T02:58:17.000Z | 2022-02-13T13:09:27.000Z | tests/renderables/test_paginated_table.py | sauljabin/kaskade | fe270821dd05459df11adaabbd5a0ea39809456e | [
"MIT"
] | 3 | 2021-11-17T17:08:12.000Z | 2022-02-07T23:54:04.000Z | tests/renderables/test_paginated_table.py | sauljabin/kaskade | fe270821dd05459df11adaabbd5a0ea39809456e | [
"MIT"
] | 1 | 2021-12-22T17:15:54.000Z | 2021-12-22T17:15:54.000Z | from math import ceil
from typing import Any, List
from unittest import TestCase
from unittest.mock import MagicMock, patch
from rich.table import Table
from kaskade.renderables.paginated_table import PaginatedTable
from tests import faker
| 32.927184 | 88 | 0.673006 | from math import ceil
from typing import Any, List
from unittest import TestCase
from unittest.mock import MagicMock, patch
from rich.table import Table
from kaskade.renderables.paginated_table import PaginatedTable
from tests import faker
class PaginatedTableDummy(PaginatedTable):
def render_columns(self, table: Table) -> None:
pass
def render_rows(self, table: Table, renderables: List[Any]) -> None:
pass
def renderables(self, start_index: int, end_index: int) -> List[Any]:
pass
class TestPaginatedTable(TestCase):
def test_page_size_is_total_items_when_negative(self):
total_items = faker.pyint()
paginated_table = PaginatedTableDummy(total_items, page_size=-1)
self.assertEqual(total_items, paginated_table.page_size)
def test_page_size_if_bigger_then_0(self):
total_items = faker.pyint()
page_size = 5
paginated_table = PaginatedTableDummy(total_items, page_size=page_size)
self.assertEqual(page_size, paginated_table.page_size)
def test_set_page_if_valid(self):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 2
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
self.assertEqual(page, paginated_table.page)
def test_set_page_if_less_than_0(self):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 1
paginated_table = PaginatedTableDummy(total_items, page_size=page_size, page=-1)
self.assertEqual(page, paginated_table.page)
def test_set_page_if_greater_than_total_pages(self):
total_items = faker.pyint(min_value=10, max_value=20)
page_size = 2
page = 1000000
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
self.assertEqual(paginated_table.total_pages(), paginated_table.page)
def test_total_pages_if_page_size_is_negative(self):
total_items = faker.pyint()
paginated_table = PaginatedTableDummy(total_items)
paginated_table.page_size = -1
self.assertEqual(0, paginated_table.total_pages())
def test_total_pages(self):
total_items = faker.pyint(min_value=1, max_value=9)
page_size = faker.pyint(min_value=10, max_value=20)
paginated_table = PaginatedTableDummy(total_items)
self.assertEqual(ceil(total_items / page_size), paginated_table.total_pages())
def test_first_page(self):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 2
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
paginated_table.first_page()
self.assertEqual(1, paginated_table.page)
def test_last_page(self):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 2
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
paginated_table.last_page()
self.assertEqual(paginated_table.total_pages(), paginated_table.page)
def test_next_page(self):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 2
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
paginated_table.next_page()
self.assertEqual(page + 1, paginated_table.page)
def test_previous_page(self):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 2
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
paginated_table.previous_page()
self.assertEqual(page - 1, paginated_table.page)
def test_start_and_end_index(self):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 2
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
self.assertEqual((page - 1) * page_size, paginated_table.start_index())
self.assertEqual(page * page_size, paginated_table.end_index())
def test_str(self):
total_items = faker.pyint(min_value=10)
paginated_table = PaginatedTableDummy(total_items)
self.assertEqual(str([]), str(paginated_table))
def test_str_renderables(self):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 2
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
renderables = faker.pylist()
paginated_table.renderables = MagicMock(return_value=renderables)
self.assertEqual(str(renderables), str(paginated_table))
@patch("kaskade.renderables.paginated_table.Text.from_markup")
@patch("kaskade.renderables.paginated_table.Table")
def test_rich(self, mock_class_table, mock_text_markup):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 2
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
paginated_table.render_columns = MagicMock()
paginated_table.render_rows = MagicMock()
renderables = faker.pylist()
paginated_table.renderables = MagicMock(return_value=renderables)
mock_table = MagicMock()
mock_class_table.return_value = mock_table
mock_column = MagicMock()
mock_table.columns = [mock_column]
mock_table.rows = ["", ""]
mock_text = MagicMock()
mock_text_markup.return_value = mock_text
paginated_table.__rich__()
mock_class_table.assert_called_once_with(
title_style="",
expand=True,
box=None,
show_edge=False,
)
paginated_table.render_columns.assert_called_once_with(mock_table)
paginated_table.render_rows.assert_called_once_with(mock_table, renderables)
mock_table.add_row.assert_not_called()
@patch("kaskade.renderables.paginated_table.Table")
def test_rich_rows_bigger_than_page_size(self, mock_class_table):
total_items = faker.pyint(min_value=10)
page_size = 2
page = 2
paginated_table = PaginatedTableDummy(
total_items, page_size=page_size, page=page
)
mock_table = MagicMock()
mock_class_table.return_value = mock_table
mock_column = MagicMock()
mock_table.columns = [mock_column]
mock_table.rows = faker.pylist()
actual = paginated_table.__rich__()
self.assertEqual("Rows greater than [yellow bold]2[/]", actual)
| 5,768 | 645 | 126 |
4d8670ad4fa729ccd750e2e738b88537b7ec5842 | 3,194 | py | Python | reviewboard/site/models.py | vigneshsrinivasan/reviewboard | 4775130c1c1022f81edc11928e02b1b6c069f6ed | [
"MIT"
] | 1 | 2020-02-11T07:09:14.000Z | 2020-02-11T07:09:14.000Z | reviewboard/site/models.py | vigneshsrinivasan/reviewboard | 4775130c1c1022f81edc11928e02b1b6c069f6ed | [
"MIT"
] | null | null | null | reviewboard/site/models.py | vigneshsrinivasan/reviewboard | 4775130c1c1022f81edc11928e02b1b6c069f6ed | [
"MIT"
] | null | null | null | #
# models.py -- Models for the "reviewboard.site" app.
#
# Copyright (c) 2010 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class LocalSite(models.Model):
"""
A division within a Review Board installation.
This allows the creation of independent, isolated divisions within a given
server. Users can be designated as members of a LocalSite, and optionally
as admins (which allows them to manipulate the repositories, groups and
users in the site).
Pretty much every other model in this module can all be assigned to a single
LocalSite, at which point only members will be able to see or manipulate
these objects. Access control is performed at every level, and consistency
is enforced through a liberal sprinkling of assertions and unit tests.
"""
name = models.SlugField(_('name'), max_length=32, blank=False, unique=True)
users = models.ManyToManyField(User, blank=True,
related_name='local_site')
admins = models.ManyToManyField(User, blank=True,
related_name='local_site_admins')
def is_accessible_by(self, user):
"""Returns whether or not the user has access to this LocalSite.
This checks that the user is logged in, and that they're listed in the
'users' field.
"""
return (user.is_authenticated() and
self.users.filter(pk=user.pk).exists())
def is_mutable_by(self, user, perm='site.change_localsite'):
"""Returns whether or not a user can modify settings in a LocalSite.
This checks that the user is either staff with the proper permissions,
or that they're listed in the 'admins' field.
By default, this is checking whether the LocalSite itself can be
modified, but a different permission can be passed to check for
another object.
"""
return user.has_perm(perm) or self.admins.filter(pk=user.pk).exists()
| 43.162162 | 80 | 0.71603 | #
# models.py -- Models for the "reviewboard.site" app.
#
# Copyright (c) 2010 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
class LocalSite(models.Model):
"""
A division within a Review Board installation.
This allows the creation of independent, isolated divisions within a given
server. Users can be designated as members of a LocalSite, and optionally
as admins (which allows them to manipulate the repositories, groups and
users in the site).
Pretty much every other model in this module can all be assigned to a single
LocalSite, at which point only members will be able to see or manipulate
these objects. Access control is performed at every level, and consistency
is enforced through a liberal sprinkling of assertions and unit tests.
"""
name = models.SlugField(_('name'), max_length=32, blank=False, unique=True)
users = models.ManyToManyField(User, blank=True,
related_name='local_site')
admins = models.ManyToManyField(User, blank=True,
related_name='local_site_admins')
def is_accessible_by(self, user):
"""Returns whether or not the user has access to this LocalSite.
This checks that the user is logged in, and that they're listed in the
'users' field.
"""
return (user.is_authenticated() and
self.users.filter(pk=user.pk).exists())
def is_mutable_by(self, user, perm='site.change_localsite'):
"""Returns whether or not a user can modify settings in a LocalSite.
This checks that the user is either staff with the proper permissions,
or that they're listed in the 'admins' field.
By default, this is checking whether the LocalSite itself can be
modified, but a different permission can be passed to check for
another object.
"""
return user.has_perm(perm) or self.admins.filter(pk=user.pk).exists()
def __unicode__(self):
return self.name
| 26 | 0 | 27 |
dc9ee33d04b651fe9c469f6036109271660c1165 | 2,907 | py | Python | tests/tests_from_genbanks/test_from_genbanks.py | simone-pignotti/DnaChisel | b7f0f925c9daefcc5fec903a13cfa74c3b726a7a | [
"MIT"
] | null | null | null | tests/tests_from_genbanks/test_from_genbanks.py | simone-pignotti/DnaChisel | b7f0f925c9daefcc5fec903a13cfa74c3b726a7a | [
"MIT"
] | null | null | null | tests/tests_from_genbanks/test_from_genbanks.py | simone-pignotti/DnaChisel | b7f0f925c9daefcc5fec903a13cfa74c3b726a7a | [
"MIT"
] | 1 | 2021-01-01T16:38:38.000Z | 2021-01-01T16:38:38.000Z | import os
import numpy
from dnachisel import (
CircularDnaOptimizationProblem,
DnaOptimizationProblem,
random_dna_sequence,
sequence_to_biopython_record,
Specification,
annotate_record,
)
def test_circular_example():
"""This example has a BsmBI cross origin site (location -3 -- 3)"""
path = os.path.join(
"tests", "tests_from_genbanks", "genbanks", "circular_example_1.gb"
)
problem = CircularDnaOptimizationProblem.from_record(path)
evals = problem.constraints_evaluations()
assert str(evals.evaluations[0].locations[0]) == "-3-3(+)"
problem.resolve_constraints()
assert problem.all_constraints_pass()
def test_all_shorthands():
"""This test compiles all shorthands as a check that nothing is broken."""
numpy.random.seed(123)
sequence = random_dna_sequence(1000)
record = sequence_to_biopython_record(sequence)
annotate_record(record, (100, 900), label="@no(CATG)")
annotate_record(record, (100, 900), label="@gc(40-60%)")
annotate_record(record, (100, 900), label="@insert(AarI_site)")
annotate_record(record, (650, 752), label="@cds")
annotate_record(record, (100, 200), label="@keep")
annotate_record(record, (250, 273), label="@primer")
annotate_record(record, (250, 280), label="@change")
annotate_record(record, (943, 950), label="@sequence(AKGNTKT)")
annotate_record(record, (955, 958), label="@sequence(ATT|ATC|GGG)")
problem = DnaOptimizationProblem.from_record(record)
assert len(problem.constraints) == 13 # AllowPrimer counts for 4 specs.
assert not problem.all_constraints_pass()
problem.resolve_constraints()
assert problem.all_constraints_pass()
| 37.753247 | 78 | 0.713794 | import os
import numpy
from dnachisel import (
CircularDnaOptimizationProblem,
DnaOptimizationProblem,
random_dna_sequence,
sequence_to_biopython_record,
Specification,
annotate_record,
)
def test_circular_example():
"""This example has a BsmBI cross origin site (location -3 -- 3)"""
path = os.path.join(
"tests", "tests_from_genbanks", "genbanks", "circular_example_1.gb"
)
problem = CircularDnaOptimizationProblem.from_record(path)
evals = problem.constraints_evaluations()
assert str(evals.evaluations[0].locations[0]) == "-3-3(+)"
problem.resolve_constraints()
assert problem.all_constraints_pass()
def test_cuba_example_1():
path = os.path.join(
"tests", "tests_from_genbanks", "genbanks", "cuba_example_1.gbk"
)
problem = DnaOptimizationProblem.from_record(path)
assert not problem.all_constraints_pass()
problem.resolve_constraints()
assert problem.all_constraints_pass()
assert problem.objective_scores_sum() < -100
problem.optimize()
assert problem.objective_scores_sum() > -0.1
def test_all_shorthands():
"""This test compiles all shorthands as a check that nothing is broken."""
numpy.random.seed(123)
sequence = random_dna_sequence(1000)
record = sequence_to_biopython_record(sequence)
annotate_record(record, (100, 900), label="@no(CATG)")
annotate_record(record, (100, 900), label="@gc(40-60%)")
annotate_record(record, (100, 900), label="@insert(AarI_site)")
annotate_record(record, (650, 752), label="@cds")
annotate_record(record, (100, 200), label="@keep")
annotate_record(record, (250, 273), label="@primer")
annotate_record(record, (250, 280), label="@change")
annotate_record(record, (943, 950), label="@sequence(AKGNTKT)")
annotate_record(record, (955, 958), label="@sequence(ATT|ATC|GGG)")
problem = DnaOptimizationProblem.from_record(record)
assert len(problem.constraints) == 13 # AllowPrimer counts for 4 specs.
assert not problem.all_constraints_pass()
problem.resolve_constraints()
assert problem.all_constraints_pass()
def test_record_with_multispec_feature():
sequence = random_dna_sequence(100)
record = sequence_to_biopython_record(sequence)
label = "@gc(40-60%/20bp) & @no(BsaI_site) & @keep"
annotate_record(record, label=label)
problem = DnaOptimizationProblem.from_record(record)
assert len(problem.constraints) == 3
c1, c2, c3 = problem.constraints
assert c1.mini == 0.4
assert c2.pattern.name == "BsaI"
def test_feature_to_spec():
sequence = random_dna_sequence(100)
record = sequence_to_biopython_record(sequence)
label = "@gc(40-60%/20bp) & @no(BsaI_site) & @keep"
annotate_record(record, label=label)
feature = record.features[0]
specs = Specification.list_from_biopython_feature(feature)
assert len(specs) == 3
| 1,132 | 0 | 69 |
d70471019aba8a5cd3546c8d3867730dcf2268a4 | 3,855 | py | Python | project/cloudmesh-transfer/cloudmesh/transfer/command/transfer.py | cybertraining-dsc/fa19-516-160 | e71bc0c328456c5d13e124075c1555f13f228b55 | [
"Apache-2.0"
] | null | null | null | project/cloudmesh-transfer/cloudmesh/transfer/command/transfer.py | cybertraining-dsc/fa19-516-160 | e71bc0c328456c5d13e124075c1555f13f228b55 | [
"Apache-2.0"
] | 1 | 2019-09-25T00:56:07.000Z | 2019-09-25T00:56:07.000Z | project/cloudmesh-transfer/cloudmesh/transfer/command/transfer.py | cybertraining-dsc/fa19-516-160 | e71bc0c328456c5d13e124075c1555f13f228b55 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from cloudmesh.shell.command import command
from cloudmesh.shell.command import PluginCommand
from cloudmesh.transfer.api.manager import Manager
from cloudmesh.common.console import Console
from cloudmesh.common.util import path_expand
from pprint import pprint
from cloudmesh.common.debug import VERBOSE
from cloudmesh.shell.command import command, map_parameters
| 41.902174 | 94 | 0.597665 | from __future__ import print_function
from cloudmesh.shell.command import command
from cloudmesh.shell.command import PluginCommand
from cloudmesh.transfer.api.manager import Manager
from cloudmesh.common.console import Console
from cloudmesh.common.util import path_expand
from pprint import pprint
from cloudmesh.common.debug import VERBOSE
from cloudmesh.shell.command import command, map_parameters
class TransferCommand(PluginCommand):
# noinspection PyUnusedLocal
@command
def do_transfer(self, args, arguments):
"""
::
Usage:
transfer copy --source=azureblob:sourceObj --target=gcpbucket:targetObj [-r]
transfer list --target=azureblob:targetObj
transfer delete --target=gcpbucket:targetObj
transfer status --id=transfer_id
transfer statistic
This command is part of Cloudmesh's multi-cloud storage service.
Command allows users to transfer files/directories from storage of
one Cloud Service Provider (CSP) to storage of other CSP.
Current implementation is to transfer data between Azure blob
storage and gcp object.
GCP object/ Azure Blob storage credentials and container details will
be fetched from storage section of "cloudmesh.yaml"
transfer --file=FILE
transfer list
Arguments:
azureblob:sourceObj Combination of cloud name and the source object name
sourceObj Source object. Can be file or a directory.
gcpbucket:targetObj Combination of cloud name and the target object name
targetObj Target object. Can be file or a directory.
transfer_id A unique id/name assigned by cloudmesh to each
transfer instance.
Options:
-f specify the file
--id=transfer_id Unique id/name of the transfer instance.
-h Help function.
--source=aws:sourceObj Specify source cloud and source object.
--target=azure:targetObj Specify target cloud and target object.
-r Recursive transfer for folders.
Description:
transfer copy --source=<azureblob:sourceObj> --target=<gcpbucket:targetObj> [-r]
Copy file/folder from source to target. Source/target CSPs
and name of the source/target objects to be provided.
Optional argument "-r" indicates recursive copy.
transfer list --target=azureblob:targetObj
Enlists available files on target CSP at target object
transfer delete --target=azureblob:targetObj
Deletes target object from the target CSP.
transfer status --id=<transfer_id>
Returns status of given transfer instance
transfer statistic
Returns statistics of all transfer processes
Examples:
transfer copy --source=azureblob:sampleFileBlob.txt
. --target=gcpbucket:sampleFileObject.txt
"""
print("EXECUTING: ")
map_parameters(arguments,
"source",
"target")
#arguments.FILE = arguments['--file'] or None
VERBOSE(arguments)
m = Manager()
if arguments.FILE:
print("option a")
m.list(path_expand(arguments.FILE))
elif arguments.list:
print("option b")
m.list("just calling list without parameter")
Console.error("This is just a sample")
return ""
| 0 | 3,429 | 23 |
5e60ee8cca01460a2772411fc8e4f93b7ee30447 | 813 | py | Python | tests/test_DEMA.py | solocarrie/talipp | a35bbc33444c56683d4e26439f4878e92b937d7f | [
"MIT"
] | 54 | 2020-11-19T02:27:04.000Z | 2022-02-22T06:31:05.000Z | tests/test_DEMA.py | justin-pierce/talipp | f5296381e3f4270b7743694e2ab5a0da301bdaf3 | [
"MIT"
] | 24 | 2020-11-01T17:56:28.000Z | 2021-09-15T18:40:04.000Z | tests/test_DEMA.py | justin-pierce/talipp | f5296381e3f4270b7743694e2ab5a0da301bdaf3 | [
"MIT"
] | 14 | 2020-12-10T22:43:37.000Z | 2022-01-15T22:23:42.000Z | import unittest
from talipp.indicators import DEMA
from TalippTest import TalippTest
if __name__ == '__main__':
unittest.main()
| 24.636364 | 68 | 0.682657 | import unittest
from talipp.indicators import DEMA
from TalippTest import TalippTest
class TestDEMA(TalippTest):
def setUp(self) -> None:
self.input_values = list(TalippTest.CLOSE_TMPL)
def test_init(self):
ind = DEMA(20, self.input_values)
print(ind)
self.assertAlmostEqual(ind[-3], 9.683254, places = 5)
self.assertAlmostEqual(ind[-2], 9.813792, places = 5)
self.assertAlmostEqual(ind[-1], 9.882701, places = 5)
def test_update(self):
self.assertIndicatorUpdate(DEMA(20, self.input_values))
def test_delete(self):
self.assertIndicatorDelete(DEMA(20, self.input_values))
def test_purge_oldest(self):
self.assertIndicatorPurgeOldest(DEMA(20, self.input_values))
if __name__ == '__main__':
unittest.main()
| 513 | 6 | 157 |
4239f46bc223dbb65346eea99aa992664babd48a | 227 | py | Python | tests/test_multi.py | Shimwell/example_python_package_shim | ed04d8c4a90f74dd4ddd4fc2c205d8d3858af400 | [
"MIT"
] | null | null | null | tests/test_multi.py | Shimwell/example_python_package_shim | ed04d8c4a90f74dd4ddd4fc2c205d8d3858af400 | [
"MIT"
] | null | null | null | tests/test_multi.py | Shimwell/example_python_package_shim | ed04d8c4a90f74dd4ddd4fc2c205d8d3858af400 | [
"MIT"
] | null | null | null |
from example_python_package_shim import multi
| 13.352941 | 45 | 0.704846 |
from example_python_package_shim import multi
def test_multi_with_small_numbers():
answer = multi(3, 3)
assert answer == 9
def test_multi_with_big_numbers():
answer = multi(100, 2)
assert answer == 200
| 132 | 0 | 46 |
105e81ee50d6608ba24a33a0c8dc4cf3580532c0 | 463 | py | Python | Resources/urls.py | charanreddyvaddhi/Team_build | 99c0c6415cece121e9afb91a2af12c87dfbf2de1 | [
"Apache-2.0"
] | null | null | null | Resources/urls.py | charanreddyvaddhi/Team_build | 99c0c6415cece121e9afb91a2af12c87dfbf2de1 | [
"Apache-2.0"
] | null | null | null | Resources/urls.py | charanreddyvaddhi/Team_build | 99c0c6415cece121e9afb91a2af12c87dfbf2de1 | [
"Apache-2.0"
] | null | null | null | from django.urls import path, include, re_path
from .import views
urlpatterns=[
path('',views.page1,name="index"),
path('Resources/',views.MembersListView.as_view(),name="resources"),
path('Resource/<int:pk>', views.MembersDetailView.as_view(), name='Resource-detail'),
path('Technologies/',views.TechnologiesListView.as_view(),name="technologies"),
#re_path(r'^member/(?P<pk>\d+)$', views.MembersDetailView.as_view(), name='member-detail')
] | 46.3 | 94 | 0.714903 | from django.urls import path, include, re_path
from .import views
urlpatterns=[
path('',views.page1,name="index"),
path('Resources/',views.MembersListView.as_view(),name="resources"),
path('Resource/<int:pk>', views.MembersDetailView.as_view(), name='Resource-detail'),
path('Technologies/',views.TechnologiesListView.as_view(),name="technologies"),
#re_path(r'^member/(?P<pk>\d+)$', views.MembersDetailView.as_view(), name='member-detail')
] | 0 | 0 | 0 |
b8b54efd1144b53cbbbbddf2104f9fa61140cf0c | 622 | py | Python | CCICApp/zhihu/zhihu_search.py | kiddhmh/DjangoSpiders | e14b88305acf769f344ef910c238bf55afbec273 | [
"MIT"
] | 2 | 2018-04-19T02:51:05.000Z | 2019-08-12T03:23:31.000Z | CCICApp/zhihu/zhihu_search.py | kiddhmh/DjangoSpiders | e14b88305acf769f344ef910c238bf55afbec273 | [
"MIT"
] | 1 | 2018-04-23T06:45:45.000Z | 2018-04-23T06:45:45.000Z | CCICApp/zhihu/zhihu_search.py | kiddhmh/DjangoSpiders | e14b88305acf769f344ef910c238bf55afbec273 | [
"MIT"
] | 1 | 2018-04-23T02:12:33.000Z | 2018-04-23T02:12:33.000Z | from CCICApp.zhihu.zhihu_login import ZhiHuLogin
| 36.588235 | 83 | 0.657556 | from CCICApp.zhihu.zhihu_login import ZhiHuLogin
class ZhiHuSearch(object):
homeURL = 'https://www.zhihu.com'
def __init__(self, searchDic):
self.__search_text = searchDic['keyword']
self.__loginclient = ZhiHuLogin(searchDic) # 初始化,若没登录则登录
# addq = add + q 添加参数的意思
def do_search(self, url: object, addq: object, auth: object = False) -> object:
self.__searchURL = url
if addq:
# 拼接搜索url
self.__searchURL = self.homeURL + self.__searchURL + self.__search_text
soup = self.__loginclient.open(self.__searchURL, auth=auth)
return soup
| 455 | 139 | 23 |
df205396c364e18773beadfa2ce45fc1e9cfd20c | 179 | py | Python | tests/test00_basic/models.py | SelfHacked/drf-tree-routers | 3600c5db2a6a8287aeb8d269ed7c830437a06555 | [
"MIT"
] | null | null | null | tests/test00_basic/models.py | SelfHacked/drf-tree-routers | 3600c5db2a6a8287aeb8d269ed7c830437a06555 | [
"MIT"
] | null | null | null | tests/test00_basic/models.py | SelfHacked/drf-tree-routers | 3600c5db2a6a8287aeb8d269ed7c830437a06555 | [
"MIT"
] | 1 | 2021-06-03T12:03:49.000Z | 2021-06-03T12:03:49.000Z | from django.db import models
| 14.916667 | 36 | 0.653631 | from django.db import models
class A(models.Model):
x = models.IntegerField()
class B(models.Model):
a = models.ForeignKey(
A, on_delete=models.CASCADE,
)
| 0 | 102 | 46 |
6c30462aa9a6770ee1f961fff59237d9e5ceb030 | 242 | py | Python | sso/tasks.py | uktrade/sso | f4fb527cfe12955c079251031261f2407956bad3 | [
"MIT"
] | 1 | 2017-06-02T09:09:02.000Z | 2017-06-02T09:09:02.000Z | sso/tasks.py | uktrade/sso | f4fb527cfe12955c079251031261f2407956bad3 | [
"MIT"
] | 372 | 2016-10-25T17:10:18.000Z | 2022-03-30T14:53:55.000Z | sso/tasks.py | uktrade/sso | f4fb527cfe12955c079251031261f2407956bad3 | [
"MIT"
] | 3 | 2016-11-10T17:13:39.000Z | 2019-12-06T16:54:46.000Z | from django.core.management import call_command
from conf.celery import app
@app.task(autoretry_for=(TimeoutError,))
@app.task()
| 17.285714 | 47 | 0.760331 | from django.core.management import call_command
from conf.celery import app
@app.task(autoretry_for=(TimeoutError,))
def notify_users():
call_command('notify_users')
@app.task()
def archive_users():
call_command('archive_users')
| 64 | 0 | 44 |
2a9dce9f8bf8bd688a9a392773becfb56f6fa19e | 14,554 | py | Python | dem_generator.py | vikineema/dem-utils | 9c11778b5516e571e7f1b622170c73efb07e2c12 | [
"MIT"
] | 5 | 2019-11-12T00:20:12.000Z | 2022-02-17T19:08:37.000Z | dem_generator.py | vikineema/dem-utils | 9c11778b5516e571e7f1b622170c73efb07e2c12 | [
"MIT"
] | 1 | 2021-08-24T05:24:55.000Z | 2021-08-24T13:29:58.000Z | dem_generator.py | ArMoraer/dem-utils | d1da1636319a000720c9ca594db6759f12f95f81 | [
"MIT"
] | 2 | 2019-01-25T15:35:56.000Z | 2021-08-18T18:01:44.000Z | # -*- coding: utf-8 -*-
"""
/***************************************************************************
DemGenerator
Random DEM generator
--------------------
begin : 2017-08-29
git sha : $Format:%H$
copyright : (C) 2017 by Alexandre Delahaye
email : menoetios@gmail.com
***************************************************************************/
"""
import argparse
import numpy as np
import sys
from math import *
from osgeo import gdal
from osgeo.gdalconst import *
parser = argparse.ArgumentParser(description='Generates a random DEM.')
parser.add_argument("dempath", metavar='path', help='output DEM path')
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
parser.add_argument("--height", type=int, default=1000, help="DEM height (default: 1000)")
parser.add_argument("--width", type=int, default=1000, help="DEM width (default: 1000)")
parser.add_argument("--waterratio", type=float, default=0.5, help="water ratio (default: 0.5)")
parser.add_argument("--island", action="store_true", help="set island mode")
parser.add_argument("--scale", type=float, default=20, help="features scale (default: 20)")
parser.add_argument("--detailslevel", type=float, default=3, help="level of features details (default: 3)")
parser.add_argument("--spread", type=float, default=3, help="features spread (default: 3)")
parser.add_argument("--roughness", type=float, default=5, help="features roughness (default: 5)")
parser.add_argument("--directionality", type=float, default=5, help="features directionality (default: 5)")
parser.add_argument("--preset", type=str, choices=['archipelago', 'mountainous_island'], \
help="predefined set of parameters (overrides all parameters except height and width)")
args = parser.parse_args()
dem = DemGenerator()
dem.setParams(
verbose=args.verbose,
height=args.height,
width=args.width,
waterRatio=args.waterratio,
island=args.island,
scale=args.scale,
detailsLevel=args.detailslevel,
spread=args.spread,
roughness=args.roughness,
directionality=args.directionality,
preset=args.preset)
dem.generate()
dem.writeToFile(args.dempath)
| 34.406619 | 107 | 0.667445 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
DemGenerator
Random DEM generator
--------------------
begin : 2017-08-29
git sha : $Format:%H$
copyright : (C) 2017 by Alexandre Delahaye
email : menoetios@gmail.com
***************************************************************************/
"""
import argparse
import numpy as np
import sys
from math import *
from osgeo import gdal
from osgeo.gdalconst import *
class GaussianKernel():
def __init__(self, fwhm, amplitude, orientation, ratio, level):
"""Constructor.
:param fwhm: full-width-half-maximum (effective radius).
:param amplitude:
:param orientation: orientation (in rad)
:param ratio: aspect ratio (eg 2 means that the kernel is 2 times
larger in the <orientation> direction. Should be >= 1
:param level:
"""
self.fwhm = fwhm
self.ornt = orientation
self.ratio = ratio
self.ampl = amplitude
self.level = level
self.size = int( fwhm * 3.5 * sqrt(ratio) ) # kernel size (in px)
if not self.size % 2:
self.size += 1 # always get an odd size
# self.kern = self.getAsArray( offset )
# print("fwhm={0}, ampl={1}, orient={2}, ratio={3}".format(fwhm,amplitude,orientation,ratio))
def getAsArray(self, offset):
"""
Make a square gaussian kernel.
:param offset:
:return numpy array
"""
x = np.arange( 0, self.size, 1, float )
y = np.copy(x[:,np.newaxis])
x -= offset[0]
y -= offset[1]
x0 = y0 = self.size / 2
# x and y centering and orientation
x1 = (x-x0) * cos(self.ornt) - (y-y0) * sin(self.ornt)
y1 = (x-x0) * sin(self.ornt) + (y-y0) * cos(self.ornt)
return self.ampl * np.exp(-4*np.log(2) * \
(x1**2 / self.ratio + y1**2 * self.ratio) / self.fwhm**2)
def getRandomLocation(self, offset, thresh=0):
"""
Returns a random location inside a Gaussian kernel.
:param offset:
:param thresh: must be between 0 and 1. If 0, the returned location is
randomly selected following the kernel distribution (i.e. the closest to
the kernel center, the more likely). Else, it is picked amongst all pixels
whose value is higher than thresh*amplitude, following a uniform law.
:return tuple
"""
if thresh == 0:
a = self.ampl * np.random.random()
else:
a = self.ampl * thresh
val = 0
kernArray = self.getAsArray( offset )
while val < a:
x = np.random.randint( 0, self.size-1 )
y = np.random.randint( 0, self.size-1 )
val = kernArray[ x, y ]
return (x, y)
class DemGenerator():
def __init__(self):
"""
Constructor.
:param xxx: Xxx
"""
def setParams(self, verbose=False, width=1000, height=1000, waterRatio=0.5, island=False, scale=20,
detailsLevel=3, spread=3, roughness=5, directionality=5, preset=None):
"""
Sets generator parameters. Must be called right after instantiation.
:param width: DEM width (default=1000)
:param height: DEM height (default=1000)
:param waterRatio: ratio of negative DEM values (default=0.5)
:param island: island mode. Ensures that no piece of land is cut at the DEM border (default=False)
:param scale: scale of main terrain features. Range: 1-100 (default=20)
:param detailsLevel: depth of the "kernel tree". Higher value means more roughness.
Warning: computation time increases exponentially with this parameter (default=3)
:param spread: 1-6 (default=3)
:param roughness: 1-10 (default=5)
:param directionality: the higher this parameter, the more "oriented" the map features.
Range: 1-10 (default=5)
"""
self.demWidth = width
self.demHeight = height
self.dem = np.zeros( (self.demWidth, self.demHeight), dtype=np.float32 )
# Preset parameters
# -----------------
if preset == 'archipelago':
waterRatio = 0.9 # 0.5
island = False
scale = 5 # 20
detailsLevel = 2 # 3
spread = 6 # 3
roughness = 3.5 # 5
directionality = 5
elif preset == 'mountainous_island':
waterRatio = 0.5
island = True # False
scale = 20
detailsLevel = 3
spread = 4 # 3
roughness = 5
directionality = 10 # 5
# Parameters pre-initialisation
# -----------------------------
# Scale
if scale < 1: scale = 1
if scale > 100: scale = 100
initMeanFwhm = int(max(width, height) / 100 * scale)
# Details
if detailsLevel < 0: detailsLevel = 0
maxLevelChildren = detailsLevel
# Spread
if spread < 1: spread = 1
if spread > 6: spread = 6
nInitKernels = int(spread * 2)
locationThresh = pow(10, (1-float(spread))/2)
if island: initLocRatio = float(spread) / 30 # Range: 0.033-0.2
else: initLocRatio = min(0.5, float(spread) / 15) # Range: 0.1-0.4
# Roughness
if roughness < 1: roughness = 1
if roughness > 10: roughness = 10
initMeanAmpl = roughness * 4
meanReducFactor = float(roughness) / 12 # Range: 0.083-0.83
maxReducFactor = float(roughness) / 6 # Range: 0.167-1.67
nChildren = int(float(spread) * sqrt(float(roughness))) # Range: 1-15
# Directionality
if directionality < 1: directionality = 1
if directionality > 10: directionality = 10
initMaxRatio = 1 + (float(directionality-1) / 3) # Range: 1-4.333
deltaOrnt = (11 - directionality) * pi / 10
# Parameters related to first-level kernels
# -----------------------------------------
self.nInitKernels = nInitKernels # Nbr of first-level kernels
self.initLocRatio = initLocRatio # Max relative distance of the centers of the first-level
# kernels to the center of the image (must be <= 0.5)
self.initMeanFwhm = initMeanFwhm # Mean full width at half maximum of first-level kernels.
# FWHM follows a triangular distribution centered on this value.
self.initMeanAmpl = initMeanAmpl # Mean amplitude of first-level kernels.
# Amplitude follows a triangular distribution centered on this value.
self.initMaxRatio = initMaxRatio # Max gaussian ratio of first-level kernels.
# Gaussian ratio follows a uniform distribution between 1 and this value.
# Parameters related to children kernels
# --------------------------------------
self.nChildren = nChildren
self.maxLevelChildren = maxLevelChildren
self.meanReducFactor = meanReducFactor # The reduction factor determines the FWHM and amplitude of
self.maxReducFactor = maxReducFactor # children kernels, relative to the parent kernel. It follows
# a triangular distribution between 0 and self.maxReducFactor,
# whose mode is self.meanReducFactor.
self.deltaOrnt = deltaOrnt # Maximum change of orientation
self.locationThresh = 0.01 # Threshold used for random children location. The closer to 1,
# the closer to the center of the parent kernel.
# Other parameters
# ----------------
self.islandMode = island # If true, the sea level will be raised until there is at least
# a 1-pixel margin of sea arounf the DEM
self.waterRatio = waterRatio # Ratio of negative height values. If the island mode is
# enabled, this parameter might not be taken into account.
self.verbose = verbose
def generate(self):
for i in range(self.nInitKernels):
fwhm = np.random.triangular( 0.5*self.initMeanFwhm, self.initMeanFwhm, 1.5*self.initMeanFwhm )
# ampl = np.random.triangular( 0.5*self.initMeanAmpl, self.initMeanAmpl, 1.5*self.initMeanAmpl )
ampl = np.random.triangular( 0, self.initMeanAmpl, 1.5*self.initMeanAmpl ) # 0 allows for plains
ornt = np.random.uniform( 0, 2*pi )
ratio = np.random.uniform( 1, self.initMaxRatio )
gauss = GaussianKernel( fwhm, ampl, ornt, ratio, 0 )
location = ( \
self.demWidth * (.5 + np.random.uniform(-self.initLocRatio, self.initLocRatio)) - gauss.size/2, \
self.demHeight * (.5 + np.random.uniform(-self.initLocRatio, self.initLocRatio)) - gauss.size/2 )
# print(location[0]+(gauss.size/2), location[1]+(gauss.size/2))
self.addKernelToDem( gauss.getAsArray((0,0)), location )
# Recursive call
self.generateChildren( gauss, location, (0,0), nChildren=self.nChildren )
if self.verbose:
sys.stdout.write(('%.0f' % (100*float(i+1)/float(self.nInitKernels))) + '%... ')
sys.stdout.flush()
if self.verbose: print('') # Line break
self.setSeaLevel()
#self.correctElevation()
self.printStats()
print('Done')
def generateChildren(self, krn, krnLoc, krnOffset, nChildren):
"""
Generates children kernels from a parent kernel
:param krn: parent kernel
:param krnLoc: location of parent kernel (UL corner)
:param nChildren: number of children
"""
if krn.level > self.maxLevelChildren:
return
for i in range(nChildren):
# Get random parameters and create kernel
reduct = np.random.triangular( 0, self.meanReducFactor, self.maxReducFactor )
fwhm = reduct * krn.fwhm
ampl = reduct * krn.ampl * 0.9
ornt = np.random.triangular( krn.ornt-self.deltaOrnt, krn.ornt, krn.ornt+self.deltaOrnt )
ratio = np.random.triangular( max(1, 0.8*krn.ratio), \
max(1, 0.9*krn.ratio)+.01, max(1, 1.0*krn.ratio)+.02 )
gauss = GaussianKernel(fwhm, ampl, ornt, ratio, krn.level+1)
# Skip if kernel is too small
if gauss.size < 5:
continue
# Get random position inside parent kernel
cnt_x, cnt_y = krn.getRandomLocation(krnOffset, self.locationThresh)
x, y = krnLoc
childKrnLoc = (cnt_x - float(gauss.size)/2.0 + x, \
cnt_y - float(gauss.size)/2.0 + y)
childOffset = (np.modf(childKrnLoc[0])[0], np.modf(childKrnLoc[1])[0])
# print("parent location=", krnLoc)
# print(cnt_x, cnt_y)
# print(childKrnLoc)
# print("child location=", childKrnLoc)
# Add child kernel to DEM and create grand-children
# print("Adding kernel with offset ", childOffset)
self.addKernelToDem( gauss.getAsArray(childOffset), childKrnLoc )
self.generateChildren( gauss, childKrnLoc, childOffset, nChildren=self.nChildren )
def addKernelToDem(self, kernArray, ul):
"""
Add a gaussian kernel to a larger DEM.
:param kernel: GaussianKernel instance
:param ul: kernel upper left coordinates in the DEM
"""
krn_x, krn_y = np.shape(kernArray)
krn_start_x, krn_start_y = 0, 0
krn_end_x, krn_end_y = krn_x - 1, krn_y - 1
dem_start_x, dem_start_y = int(ul[0]), int(ul[1])
dem_end_x, dem_end_y = dem_start_x + krn_end_x, dem_start_y + krn_end_y
if dem_start_x < 0:
krn_start_x = -dem_start_x
dem_start_x = 0
if dem_start_y < 0:
krn_start_y = -dem_start_y
dem_start_y = 0
if dem_end_x >= self.demWidth:
krn_end_x = krn_x - (dem_end_x - self.demWidth + 1)
dem_end_x = self.demWidth
if dem_end_y >= self.demHeight:
krn_end_y = krn_y - (dem_end_y - self.demHeight + 1)
dem_end_y = self.demHeight
# print(dem_start_x, dem_start_y)
# print(dem_end_x, dem_end_y)
# print(krn_start_x, krn_start_y)
# print(krn_end_x, krn_end_y)
try:
self.dem[dem_start_x:dem_end_x, dem_start_y:dem_end_y] = \
self.dem[dem_start_x:dem_end_x, dem_start_y:dem_end_y] + \
kernArray[krn_start_x:krn_end_x, krn_start_y:krn_end_y]
except ValueError:
0
# print("Error when adding kernel: ", "shape=", np.shape(kernArray), " UL=", ul)
def setSeaLevel(self):
"""
Adjusts the sea level (0-level) according to self.waterRatio and self.island
"""
# Initial water raising
zeroLevel = np.percentile(self.dem, 100*self.waterRatio)
if self.verbose: print('[Water ratio] Sea level raised by ', zeroLevel)
self.dem -= zeroLevel
# Island mode: set at least a 1-pixel margin of water
if self.islandMode:
maxInMargin = max(np.max(self.dem[0,:]), \
np.max(self.dem[-1,:]), \
np.max(self.dem[:,0]), \
np.max(self.dem[:,-1]))
if maxInMargin > 0:
if self.verbose: print('[Island mode] Sea level raised by ', maxInMargin)
self.dem -= maxInMargin
def correctElevation(self):
"""
Adjusts mean elevation
TODO...
"""
self.dem /= 10
def printStats(self):
"""
Prints DEM statistics
"""
print('Water ratio:', float(np.count_nonzero(self.dem < 0)) / float(self.dem.size))
print('Lowest point:', np.min(self.dem))
print('Highest point:', np.max(self.dem))
print('Mean positive elevation:', np.mean(self.dem[self.dem >= 0]))
print('Median positive elevation:', np.median(self.dem[self.dem >= 0]))
def writeToFile(self, path):
driver = gdal.GetDriverByName('GTiff')
outDs = driver.Create(path, self.demWidth, self.demHeight, 1, GDT_Float32)
if outDs is None:
print('Could not create', path)
sys.exit(1)
outDs.SetGeoTransform((0, 1, 0, self.demHeight, 0, -1))
# write the data
outBand = outDs.GetRasterBand(1)
outBand.WriteArray(np.transpose(self.dem), 0, 0)
# flush data to disk, set the NoData value and calculate stats
outBand.FlushCache()
outBand.SetNoDataValue(-99)
parser = argparse.ArgumentParser(description='Generates a random DEM.')
parser.add_argument("dempath", metavar='path', help='output DEM path')
parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
parser.add_argument("--height", type=int, default=1000, help="DEM height (default: 1000)")
parser.add_argument("--width", type=int, default=1000, help="DEM width (default: 1000)")
parser.add_argument("--waterratio", type=float, default=0.5, help="water ratio (default: 0.5)")
parser.add_argument("--island", action="store_true", help="set island mode")
parser.add_argument("--scale", type=float, default=20, help="features scale (default: 20)")
parser.add_argument("--detailslevel", type=float, default=3, help="level of features details (default: 3)")
parser.add_argument("--spread", type=float, default=3, help="features spread (default: 3)")
parser.add_argument("--roughness", type=float, default=5, help="features roughness (default: 5)")
parser.add_argument("--directionality", type=float, default=5, help="features directionality (default: 5)")
parser.add_argument("--preset", type=str, choices=['archipelago', 'mountainous_island'], \
help="predefined set of parameters (overrides all parameters except height and width)")
args = parser.parse_args()
dem = DemGenerator()
dem.setParams(
verbose=args.verbose,
height=args.height,
width=args.width,
waterRatio=args.waterratio,
island=args.island,
scale=args.scale,
detailsLevel=args.detailslevel,
spread=args.spread,
roughness=args.roughness,
directionality=args.directionality,
preset=args.preset)
dem.generate()
dem.writeToFile(args.dempath)
| 1,674 | 10,565 | 46 |
cd06d3fad35192473e36ff571f9628cc687951e0 | 7,313 | py | Python | 14B-088/HI/analysis/HI_pvslices_thin_figures.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | 1 | 2021-03-08T23:19:12.000Z | 2021-03-08T23:19:12.000Z | 14B-088/HI/analysis/HI_pvslices_thin_figures.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | null | null | null | 14B-088/HI/analysis/HI_pvslices_thin_figures.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | null | null | null |
'''
Make a figure of the thin pv-slices stacked on top of each other.
'''
from spectral_cube import SpectralCube, Projection
from astropy.io import fits
from astropy import units as u
import numpy as np
from glob import glob
import os
from os.path import join as osjoin
import matplotlib.pyplot as plt
from aplpy import FITSFigure
from paths import (fourteenB_HI_data_wGBT_path, allfigs_path,
fourteenB_wGBT_HI_file_dict)
from constants import hi_freq
from plotting_styles import twocolumn_figure, default_figure
# Make sure the figure directory exists
fig_path = allfigs_path("pvslices")
if not os.path.exists(fig_path):
os.mkdir(fig_path)
pvslice_dir = fourteenB_HI_data_wGBT_path("downsamp_1kms/")
# I need the beam in the cube to convert to K
cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("downsamp_1kms/M33_14B-088_HI.clean.image.GBT_feathered.1kms.fits"))
jybeam_to_K = cube.beam.jtok(hi_freq)
del cube
# Get all pv-slices
filenames = glob(osjoin(pvslice_dir, "M33_14B-088_HI.clean.image.GBT_feathered.1kms_PA_*_pvslice_40.0arcsec_width.fits"))
# The slices go from a PA of 0 to 175 in increments of 5 deg
ordered_filenames = []
pas = np.arange(0, 180, 5)
for pa in pas:
for fname in filenames:
if "PA_{}_".format(pa) in fname:
ordered_filenames.append(fname)
break
# Want to put on a common scale. Grab the max from all slices.
max_val = 0
for fname in ordered_filenames:
hdu = fits.open(fname)[0]
max_slice_val = np.nanmax(hdu.data)
if max_slice_val > max_val:
max_val = max_slice_val
# Split into figures of 6
for i in range(6):
fig = plt.figure(figsize=(8.1, 11.))
for j, n in enumerate(np.arange(6 * i, 6 * (i + 1))):
hdu = fits.open(ordered_filenames[n])[0]
fig_n = FITSFigure(hdu, subplot=(6, 1, j + 1), figure=fig)
fig_n.show_grayscale(invert=True, stretch='arcsinh')
fig_n.show_contour(hdu,
levels=[2 / jybeam_to_K.value,
3 / jybeam_to_K.value,
4 / jybeam_to_K.value],
smooth=3)
zero_vel_posn = hdu.header['CRVAL2'] / \
np.abs(hdu.header['CDELT2'])
fig_n._ax1.axhline(zero_vel_posn * 1000., color='k', linestyle='-.',
linewidth=1, alpha=0.75)
# Add line at M33's center
# Must be in the center, since the pv path is defined wrt to the center
fig_n._ax1.axvline(hdu.shape[1] / 2, color='k', linestyle='-.',
linewidth=1, alpha=0.75)
fig_n._ax1.set_yticklabels(np.array([-300000, -250000, -200000,
-150000, -100000]) / 1000)
# fig_n.set_axis_labels(ylabel='Velocity (km/s)')
fig_n.hide_axis_labels()
fig_n.hide_ytick_labels()
# Put the PA in the upper corner
if i < 4:
fig_n.add_label(0.81, 0.8, "{} deg".format(int(pas[n])),
relative=True, size=14,
bbox={"boxstyle": "square", "facecolor": "w"})
else:
fig_n.add_label(0.2, 0.8, "{} deg".format(int(pas[n])),
relative=True, size=14,
bbox={"boxstyle": "square", "facecolor": "w"})
if j != 5:
fig_n.hide_xaxis_label()
fig_n.hide_xtick_labels()
# if j == 0:
# fig_n.add_colorbar()
# fig_n.colorbar.set_location('top')
# fig_n.colorbar.set_label_properties(size=11)
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_{}.png".format(i)))
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_{}.pdf".format(i)))
plt.close()
Now make a figure of all of the pv-slice paths on the zeroth moment map
mom0 = Projection.from_hdu(fits.open(fourteenB_wGBT_HI_file_dict['Moment0'])[0])
mom0.quicklook()
mom0.FITSFigure.show_regions(osjoin(pvslice_dir, "M33_14B-088_HI.clean.image.GBT_feathered.1kms_pvslice_40.0arcsec_width.reg"))
mom0.FITSFigure.save(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_paths.png"))
mom0.FITSFigure.save(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_paths.pdf"))
# Make a smaller figure for the paper. Include pv-slices along the major,
# minor and warped major (135 deg) axes
twocolumn_figure()
fig = plt.figure(figsize=(8.4, 4.2))
max_size = fits.open(ordered_filenames[0])[0].shape[1]
maj_header = fits.open(ordered_filenames[0])[0].header
for i, pa in zip(range(3), [0, 90, 135]):
idx = np.where(pas == pa)[0]
hdu = fits.open(ordered_filenames[idx])[0]
# Convert to K
hdu = fits.PrimaryHDU(hdu.data * jybeam_to_K.value, hdu.header)
# Reverse the direction of the 135 slice
if i == 2:
hdu = fits.PrimaryHDU(hdu.data[::-1], hdu.header)
if pa != 0:
# Match the major axis slice length to make each the same shape
padded_slice = np.zeros((hdu.shape[0], max_size)) * np.NaN
# Velocity axes will match
pad_size = (max_size - hdu.shape[1]) / 2
if hdu.shape[1] % 2 == 0:
padded_slice[:, pad_size:max_size - pad_size] = hdu.data
else:
padded_slice[:, pad_size:max_size - pad_size - 1] = hdu.data
hdu = fits.PrimaryHDU(padded_slice, maj_header)
fig_n = FITSFigure(hdu, subplot=(3, 1, i + 1), figure=fig)
fig_n.show_grayscale(invert=True, stretch='arcsinh', vmin=0)
fig_n.show_contour(hdu,
levels=[2,
3,
4],
smooth=3)
# zero_vel_posn = hdu.header['CRVAL2'] / \
# np.abs(hdu.header['CDELT2'])
# fig_n._ax1.axhline(zero_vel_posn * 1000., color='k', linestyle='-.',
# linewidth=1, alpha=0.75)
# Add line at M33's center
# Must be in the center, since the pv path is defined wrt to the center
fig_n._ax1.axvline(hdu.shape[1] / 2, color='k', linestyle='-.',
linewidth=1, alpha=0.75)
fig_n._ax1.set_yticklabels(np.array([-300000, -250000, -200000,
-150000, -100000]) / 1000)
# fig_n.set_axis_labels(ylabel='Velocity (km/s)')
# fig_n.hide_axis_labels()
# fig_n.hide_ytick_labels()
if i == 1:
fig_n.set_axis_labels(ylabel='Velocity (km/s)')
else:
fig_n.axis_labels.hide_y()
if i < 2:
fig_n.axis_labels.hide_x()
fig_n.tick_labels.hide_x()
else:
fig_n.set_axis_labels(xlabel='Offset (deg)')
# Put the PA in the upper corner
fig_n.add_label(0.9, 0.75, "{} deg".format(int(pa)),
relative=True, size=12,
bbox={"boxstyle": "square", "facecolor": "w"})
fig_n.add_colorbar()
fig_n.colorbar.set_ticks([0, 5, 10, 20, 40])
fig_n.colorbar.set_font(size=11)
if i == 1:
fig_n.colorbar.set_axis_label_text('Intensity (K)')
fig_n.colorbar.set_axis_label_font(size=12)
plt.subplots_adjust(hspace=0.01)
plt.tight_layout()
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_PA_0_90_135.png"))
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_PA_0_90_135.pdf"))
plt.close()
default_figure()
| 32.793722 | 127 | 0.619445 |
'''
Make a figure of the thin pv-slices stacked on top of each other.
'''
from spectral_cube import SpectralCube, Projection
from astropy.io import fits
from astropy import units as u
import numpy as np
from glob import glob
import os
from os.path import join as osjoin
import matplotlib.pyplot as plt
from aplpy import FITSFigure
from paths import (fourteenB_HI_data_wGBT_path, allfigs_path,
fourteenB_wGBT_HI_file_dict)
from constants import hi_freq
from plotting_styles import twocolumn_figure, default_figure
# Make sure the figure directory exists
fig_path = allfigs_path("pvslices")
if not os.path.exists(fig_path):
os.mkdir(fig_path)
pvslice_dir = fourteenB_HI_data_wGBT_path("downsamp_1kms/")
# I need the beam in the cube to convert to K
cube = SpectralCube.read(fourteenB_HI_data_wGBT_path("downsamp_1kms/M33_14B-088_HI.clean.image.GBT_feathered.1kms.fits"))
jybeam_to_K = cube.beam.jtok(hi_freq)
del cube
# Get all pv-slices
filenames = glob(osjoin(pvslice_dir, "M33_14B-088_HI.clean.image.GBT_feathered.1kms_PA_*_pvslice_40.0arcsec_width.fits"))
# The slices go from a PA of 0 to 175 in increments of 5 deg
ordered_filenames = []
pas = np.arange(0, 180, 5)
for pa in pas:
for fname in filenames:
if "PA_{}_".format(pa) in fname:
ordered_filenames.append(fname)
break
# Want to put on a common scale. Grab the max from all slices.
max_val = 0
for fname in ordered_filenames:
hdu = fits.open(fname)[0]
max_slice_val = np.nanmax(hdu.data)
if max_slice_val > max_val:
max_val = max_slice_val
# Split into figures of 6
for i in range(6):
fig = plt.figure(figsize=(8.1, 11.))
for j, n in enumerate(np.arange(6 * i, 6 * (i + 1))):
hdu = fits.open(ordered_filenames[n])[0]
fig_n = FITSFigure(hdu, subplot=(6, 1, j + 1), figure=fig)
fig_n.show_grayscale(invert=True, stretch='arcsinh')
fig_n.show_contour(hdu,
levels=[2 / jybeam_to_K.value,
3 / jybeam_to_K.value,
4 / jybeam_to_K.value],
smooth=3)
zero_vel_posn = hdu.header['CRVAL2'] / \
np.abs(hdu.header['CDELT2'])
fig_n._ax1.axhline(zero_vel_posn * 1000., color='k', linestyle='-.',
linewidth=1, alpha=0.75)
# Add line at M33's center
# Must be in the center, since the pv path is defined wrt to the center
fig_n._ax1.axvline(hdu.shape[1] / 2, color='k', linestyle='-.',
linewidth=1, alpha=0.75)
fig_n._ax1.set_yticklabels(np.array([-300000, -250000, -200000,
-150000, -100000]) / 1000)
# fig_n.set_axis_labels(ylabel='Velocity (km/s)')
fig_n.hide_axis_labels()
fig_n.hide_ytick_labels()
# Put the PA in the upper corner
if i < 4:
fig_n.add_label(0.81, 0.8, "{} deg".format(int(pas[n])),
relative=True, size=14,
bbox={"boxstyle": "square", "facecolor": "w"})
else:
fig_n.add_label(0.2, 0.8, "{} deg".format(int(pas[n])),
relative=True, size=14,
bbox={"boxstyle": "square", "facecolor": "w"})
if j != 5:
fig_n.hide_xaxis_label()
fig_n.hide_xtick_labels()
# if j == 0:
# fig_n.add_colorbar()
# fig_n.colorbar.set_location('top')
# fig_n.colorbar.set_label_properties(size=11)
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_{}.png".format(i)))
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_{}.pdf".format(i)))
plt.close()
Now make a figure of all of the pv-slice paths on the zeroth moment map
mom0 = Projection.from_hdu(fits.open(fourteenB_wGBT_HI_file_dict['Moment0'])[0])
mom0.quicklook()
mom0.FITSFigure.show_regions(osjoin(pvslice_dir, "M33_14B-088_HI.clean.image.GBT_feathered.1kms_pvslice_40.0arcsec_width.reg"))
mom0.FITSFigure.save(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_paths.png"))
mom0.FITSFigure.save(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_paths.pdf"))
# Make a smaller figure for the paper. Include pv-slices along the major,
# minor and warped major (135 deg) axes
twocolumn_figure()
fig = plt.figure(figsize=(8.4, 4.2))
max_size = fits.open(ordered_filenames[0])[0].shape[1]
maj_header = fits.open(ordered_filenames[0])[0].header
for i, pa in zip(range(3), [0, 90, 135]):
idx = np.where(pas == pa)[0]
hdu = fits.open(ordered_filenames[idx])[0]
# Convert to K
hdu = fits.PrimaryHDU(hdu.data * jybeam_to_K.value, hdu.header)
# Reverse the direction of the 135 slice
if i == 2:
hdu = fits.PrimaryHDU(hdu.data[::-1], hdu.header)
if pa != 0:
# Match the major axis slice length to make each the same shape
padded_slice = np.zeros((hdu.shape[0], max_size)) * np.NaN
# Velocity axes will match
pad_size = (max_size - hdu.shape[1]) / 2
if hdu.shape[1] % 2 == 0:
padded_slice[:, pad_size:max_size - pad_size] = hdu.data
else:
padded_slice[:, pad_size:max_size - pad_size - 1] = hdu.data
hdu = fits.PrimaryHDU(padded_slice, maj_header)
fig_n = FITSFigure(hdu, subplot=(3, 1, i + 1), figure=fig)
fig_n.show_grayscale(invert=True, stretch='arcsinh', vmin=0)
fig_n.show_contour(hdu,
levels=[2,
3,
4],
smooth=3)
# zero_vel_posn = hdu.header['CRVAL2'] / \
# np.abs(hdu.header['CDELT2'])
# fig_n._ax1.axhline(zero_vel_posn * 1000., color='k', linestyle='-.',
# linewidth=1, alpha=0.75)
# Add line at M33's center
# Must be in the center, since the pv path is defined wrt to the center
fig_n._ax1.axvline(hdu.shape[1] / 2, color='k', linestyle='-.',
linewidth=1, alpha=0.75)
fig_n._ax1.set_yticklabels(np.array([-300000, -250000, -200000,
-150000, -100000]) / 1000)
# fig_n.set_axis_labels(ylabel='Velocity (km/s)')
# fig_n.hide_axis_labels()
# fig_n.hide_ytick_labels()
if i == 1:
fig_n.set_axis_labels(ylabel='Velocity (km/s)')
else:
fig_n.axis_labels.hide_y()
if i < 2:
fig_n.axis_labels.hide_x()
fig_n.tick_labels.hide_x()
else:
fig_n.set_axis_labels(xlabel='Offset (deg)')
# Put the PA in the upper corner
fig_n.add_label(0.9, 0.75, "{} deg".format(int(pa)),
relative=True, size=12,
bbox={"boxstyle": "square", "facecolor": "w"})
fig_n.add_colorbar()
fig_n.colorbar.set_ticks([0, 5, 10, 20, 40])
fig_n.colorbar.set_font(size=11)
if i == 1:
fig_n.colorbar.set_axis_label_text('Intensity (K)')
fig_n.colorbar.set_axis_label_font(size=12)
plt.subplots_adjust(hspace=0.01)
plt.tight_layout()
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_PA_0_90_135.png"))
fig.savefig(osjoin(fig_path, "M33_14B-088_pvslices_40arcsec_PA_0_90_135.pdf"))
plt.close()
default_figure()
| 0 | 0 | 0 |
15c7fd943049bc466ce08d3dd9b41f716d5a08fe | 7,376 | py | Python | pySPACE/tools/live/ipmarkers.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 32 | 2015-02-20T09:03:09.000Z | 2022-02-25T22:32:52.000Z | pySPACE/tools/live/ipmarkers.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 5 | 2015-05-18T15:08:40.000Z | 2020-03-05T19:18:01.000Z | pySPACE/tools/live/ipmarkers.py | pyspace/pyspace | 763e62c0e7fa7cfcb19ccee1a0333c4f7e68ae62 | [
"BSD-3-Clause"
] | 18 | 2015-09-28T07:16:38.000Z | 2021-01-20T13:52:19.000Z | import socket
import time
import struct
import threading
import select
import random
import Queue
import warnings
if __name__ == "__main__":
markerserver = MarkerServer(port=55555, sync_interval=15)
markerserver.start()
sockets = []
for i in range(25):
c = MarkerSocket(ip="127.0.0.1", port=55555, name=str("client%d" % i))
c.start()
sockets.append(c)
for s in sockets:
mark = str("S%3d" % int(random.random()*255))
print("sending marker %s with client %s" % (mark, s.name))
s.send(mark)
time.sleep(random.random()*1)
while True:
m = markerserver.read()
if None in m:
break
print m
print markerserver
for c in sockets:
c.stop()
c.join()
sockets = []
for i in range(markerserver.sync_interval*1, 0, -1):
print("waiting.. %d" % i)
time.sleep(1)
print markerserver
markerserver.stop()
markerserver.join() | 32.069565 | 92 | 0.514235 | import socket
import time
import struct
import threading
import select
import random
import Queue
import warnings
class MarkerSocket(threading.Thread):
def __init__(self, ip="10.250.3.83", port=55555, name="test", **kwargs):
super(MarkerSocket, self).__init__(**kwargs)
self.name = name
self.ip = ip
self.port = port
self.connected = False
self.running = True
def send(self, marker):
if not self.connected:
warnings.warn("%s not sent - socket is not connected!" % marker)
return
fmt = "bb6sQ"
data = struct.pack(fmt, 2, struct.calcsize(fmt), marker[:6], long(time.time()*1000))
try:
self.s.send(data)
except socket.error:
warnings.warn("%s not sent - socket error: %s" % (marker, socket.errno))
self.connected = False
def run(self):
fmt = "bb6sQQQQ"
while self.running:
while (not self.connected) and self.running:
try:
self.s = socket.socket()
self.s.connect((self.ip,self.port))
self.s.send(self.name)
except socket.error:
time.sleep(1)
continue
self.connected = True
break
while self.connected and self.running:
(r,w,e) = select.select([self.s], [], [], .01)
if self.s in r:
_t2 = long(time.time()*1000)
beacon = ""
try:
beacon = str(self.s.recv(struct.calcsize(fmt)))
except socket.error:
warnings.warn("%s: error during recv!" % self.name)
self.connected = False
if len(beacon) == 0:
warnings.warn("%s: connection closed by remote!" % self.name)
self.connected = False
continue
(typ, size, progress, t1, t2, t3, t4) = struct.unpack(fmt, beacon)
t2 = _t2
t3 = long(time.time()*1000)
progress += "*"
beacon = struct.pack(fmt, typ, size, progress, t1, t2, t3, t4)
self.s.send(beacon)
def stop(self):
self.running = False
class MarkerServer(threading.Thread):
def __init__(self, port=55555, sync_interval=10, **kwargs):
super(MarkerServer, self).__init__(**kwargs)
self.s = socket.socket()
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.s.bind(("", port))
self.s.listen(50)
self.children = []
self.sync_interval = sync_interval
self.queue = Queue.Queue()
self.running = True
def stop(self):
self.running = False
def __repr__(self):
s = super(MarkerServer, self).__repr__()
return str("%s\n\tconnected to %d clients" % (s, len(self.children)))
def run(self):
while self.running:
(r,w,e) = select.select([self.s], [], [], .25)
if self.s in r:
(client, address) = self.s.accept()
print("connection requested %s" % (str(address)))
c = MarkerAcquisitionThread(client, address,
sync_interval=self.sync_interval,
queue=self.queue)
c.start()
self.children.append(c)
self.join_stopped_threads()
self.s.close()
for c in self.children:
if c.isAlive():
c.stop()
c.join()
self.children = []
def join_stopped_threads(self):
for c in self.children:
if not c.isAlive():
c.join()
def read(self):
if not self.queue.empty():
return self.queue.get(block=False)
return None, None
class MarkerAcquisitionThread(threading.Thread):
def __init__(self, client, address, sync_interval=10, queue=None, **kwargs):
super(MarkerAcquisitionThread, self).__init__(**kwargs)
self.client = client
self.address = address
self.sync_interval = sync_interval
self.delay_ms = 0
self.name = self.client.recv(128)
self.running = True
self.queue = queue
print("marker source %s@%s connected" % (self.name, self.address[0]))
def stop(self):
self.running = False
def __repr__(self):
s = super(MarkerAcquisitionThread, self).__repr__()
return str("%s:%s" % (s, self.address))
def run(self):
sync_fmt = "bb6sQQQQ"
mark_fmt = "bb6sQ"
last_sync = 0.0
while self.running:
(r,w,e) = select.select([self.client], [], [], .01)
if self.client in r:
try:
msg = self.client.recv(struct.calcsize(sync_fmt))
except socket.error as e:
print("client %s@%s: %s" % (self.name, self.address[0], e.strerror))
self.stop()
continue
if len(msg) == 0:
print("client %s@%s: socket closed" % (self.name, self.address[0]))
self.stop()
continue
elif len(msg) == struct.calcsize(mark_fmt):
self.show_marker(msg, mark_fmt)
elif len(msg) == struct.calcsize(sync_fmt):
self.sync_end(msg, sync_fmt)
else:
pass
if int(time.time()-last_sync) > self.sync_interval:
self.sync_start(sync_fmt)
last_sync = time.time()
self.client.close()
def sync_start(self, fmt):
t1 = long(time.time()*1000)
beacon = struct.pack(fmt, 1, struct.calcsize(fmt), "*", t1, 0, 0, 0)
self.client.send(beacon)
def sync_end(self, beacon, fmt):
(typ, size, progress, t1, t2, t3, _t4) = struct.unpack(fmt, beacon)
t4 = long(time.time()*1000)
self.delay_ms = ((int(t2)-int(t4))+(int(t3)-int(t1)))/2
# print("SYNC done! delay: %f [ms]" % self.delay_ms)
def show_marker(self, marker, fmt):
(typ, size, mark, t1) = struct.unpack(fmt, marker)
self.queue.put((str(mark).strip("\0"), int(t1-self.delay_ms)))
if __name__ == "__main__":
markerserver = MarkerServer(port=55555, sync_interval=15)
markerserver.start()
sockets = []
for i in range(25):
c = MarkerSocket(ip="127.0.0.1", port=55555, name=str("client%d" % i))
c.start()
sockets.append(c)
for s in sockets:
mark = str("S%3d" % int(random.random()*255))
print("sending marker %s with client %s" % (mark, s.name))
s.send(mark)
time.sleep(random.random()*1)
while True:
m = markerserver.read()
if None in m:
break
print m
print markerserver
for c in sockets:
c.stop()
c.join()
sockets = []
for i in range(markerserver.sync_interval*1, 0, -1):
print("waiting.. %d" % i)
time.sleep(1)
print markerserver
markerserver.stop()
markerserver.join() | 5,781 | 59 | 528 |
8b90fb42f6be668ea7c46a396d71f58c26566275 | 1,256 | py | Python | spotty/providers/gcp/config/instance_config.py | Inculus/spotty | 56863012668a6c13ad13c2a04f900047e229fbe6 | [
"MIT"
] | 1 | 2020-07-17T07:02:09.000Z | 2020-07-17T07:02:09.000Z | spotty/providers/gcp/config/instance_config.py | Inculus/spotty | 56863012668a6c13ad13c2a04f900047e229fbe6 | [
"MIT"
] | null | null | null | spotty/providers/gcp/config/instance_config.py | Inculus/spotty | 56863012668a6c13ad13c2a04f900047e229fbe6 | [
"MIT"
] | null | null | null | from spotty.config.abstract_instance_config import AbstractInstanceConfig
from spotty.providers.gcp.config.validation import validate_instance_parameters
VOLUME_TYPE_DISK = 'disk'
DEFAULT_IMAGE_NAME = 'spotty'
| 25.12 | 93 | 0.677548 | from spotty.config.abstract_instance_config import AbstractInstanceConfig
from spotty.providers.gcp.config.validation import validate_instance_parameters
VOLUME_TYPE_DISK = 'disk'
DEFAULT_IMAGE_NAME = 'spotty'
class InstanceConfig(AbstractInstanceConfig):
def __init__(self, config: dict):
super().__init__(config)
self._params = validate_instance_parameters(self._params)
@property
def project_id(self) -> str:
return self._params['projectId']
@property
def zone(self) -> str:
return self._params['zone']
@property
def machine_type(self) -> str:
return self._params['machineType']
@property
def gpu(self) -> dict:
return self._params['gpu']
@property
def on_demand(self) -> bool:
return self._params['onDemandInstance']
@property
def boot_disk_size(self) -> int:
return self._params['bootDiskSize']
@property
def image_name(self) -> str:
return self._params['imageName'] if self._params['imageName'] else DEFAULT_IMAGE_NAME
@property
def has_image_name(self) -> bool:
return bool(self._params['imageName'])
@property
def image_url(self) -> str:
return self._params['imageUrl']
| 601 | 420 | 23 |
f6f544aac80cade218109ad112302c682a95ae81 | 10,698 | py | Python | cloud_dns/entry_points.py | cogniteev/cloud-dns | c8e6aae8adef0d627d4bffab565289711d2f4a8c | [
"Apache-2.0"
] | null | null | null | cloud_dns/entry_points.py | cogniteev/cloud-dns | c8e6aae8adef0d627d4bffab565289711d2f4a8c | [
"Apache-2.0"
] | null | null | null | cloud_dns/entry_points.py | cogniteev/cloud-dns | c8e6aae8adef0d627d4bffab565289711d2f4a8c | [
"Apache-2.0"
] | null | null | null |
import argparse
import itertools
import logging
import os
import os.path as osp
import StringIO
import signal
import sys
import threading
import time
from dnslib import RR,QTYPE,RCODE
from dnslib.server import DNSServer,DNSHandler,BaseResolver,DNSLogger
from .config import (
DEFAULT_CONFIG_PATH,
GSDriver,
GStorageKeybaseProfile,
Profile,
Profiles,
)
def config_push(profile, bucket, **kwargs):
"""Push encryted configuration of a profile on Google Storage
:param profile: profile to push (a directory in ~/.config/cloud-dns/)
:param bucket: the destination Google Storage bucket
:param config_dir: absolute path to Cloud DNS root config dir
(default ~/.config/cloud-dns)
"""
profile = GStorageKeybaseProfile(profile, GSDriver, bucket, **kwargs)
profile.push()
class ZoneResolver(BaseResolver):
"""
Simple fixed zone file resolver.
"""
def __init__(self, zone_file_generator, glob=False, ttl=3600):
"""
Initialise resolver from zone file.
Stores RRs as a list of (label,type,rr) tuples
If 'glob' is True use glob match against zone file
"""
self.glob = glob
self.eq = 'matchGlob' if glob else '__eq__'
self.zone_file_generator = zone_file_generator
self.load()
if ttl > 0:
thread = threading.Thread(target=self.reload, args=(ttl,))
thread.daemon = True
thread.start()
def resolve(self,request,handler):
"""
Respond to DNS request - parameters are request packet & handler.
Method is expected to return DNS response
"""
reply = request.reply()
qname = request.q.qname
qtype = QTYPE[request.q.qtype]
local_zone = self.zone
for name, rtype, rr in local_zone:
# Check if label & type match
if getattr(qname,self.eq)(name) and (qtype == rtype or
qtype == 'ANY' or
rtype == 'CNAME'):
# If we have a glob match fix reply label
if self.glob:
a = copy.copy(rr)
a.rname = qname
reply.add_answer(a)
else:
reply.add_answer(rr)
# Check for A/AAAA records associated with reply and
# add in additional section
if rtype in ['CNAME','NS','MX','PTR']:
for a_name,a_rtype,a_rr in local_zone:
if a_name == rr.rdata.label and a_rtype in ['A','AAAA']:
reply.add_ar(a_rr)
if not reply.rr:
reply.header.rcode = RCODE.SERVFAIL
return reply
def update_etc_hosts_file(hostip_tuples, output_file=None):
"""Update specified nodes in /etc/hosts
Previous content is not lost
:param hostip_tuples: generator of tuple (host, ip)
:param output_file: destination file, default is /etc/hosts
"""
BEGIN_MARKUP = '# CloudDNS prelude - DO NOT REMOVE\n'
END_MARKUP = '# CloudDNS epilogue - DO NOT REMOVE\n'
output_file = output_file or '/etc/hosts'
if not osp.isfile(output_file):
with open(output_file, 'a'):
os.utime(output_file, None)
with open(output_file, 'r+') as etc_hosts:
lines = etc_hosts.readlines()
etc_hosts.seek(0)
etc_hosts.truncate(0)
previous_content_replaced = False
between_markups = False
for line in lines:
if not between_markups:
if line == BEGIN_MARKUP:
between_markups = True
etc_hosts.write(line)
else:
if line == END_MARKUP:
previous_content_replaced = True
for hosts, ip in hostip_tuples:
etc_hosts.write("{} {}\n".format(ip.ljust(15, ' '), ' '.join(hosts)))
between_markups = False
etc_hosts.write(line)
if not previous_content_replaced:
etc_hosts.write(BEGIN_MARKUP)
for hosts, ip in hostip_tuples:
etc_hosts.write("{} {}\n".format(ip.ljust(15, ' '), ' '.join(hosts)))
etc_hosts.write(END_MARKUP)
def etc_hosts_update(output_file=None, **kwargs):
"""Update /etc/hosts with all nodes available in configured projects
:param output_file: destination file, default is /etc/hosts
"""
update_etc_hosts_file(etc_hosts_generator(**kwargs), output_file)
def etc_hosts_generator(**kwargs):
"""Provides a generator of tuple (hosts, ip) for all nodes registered
in the configured projects
"""
generators = []
for profile in Profiles(**kwargs).list():
for project in profile.projects.values():
generators.append(project.get_hostip_tuples())
return itertools.chain(*generators)
def etc_hosts_list(**kwargs):
"""Print to standard output nodes available in all configured projects
"""
for hosts, ip in etc_hosts_generator(**kwargs):
print "{} {}".format(ip.ljust(15, ' '), ' '.join(hosts))
def cloud_dns(args=None):
"""cloud-dns entry point"""
args = args or sys.argv[1:]
from .version import version
parser = argparse.ArgumentParser(
description="DNS utilities on top of Apache libcloud"
)
parser.add_argument(
'-V', '--version',
action='version',
version='%(proj)s ' + version
)
parser.add_argument(
'-v', '--verbose',
action='count',
help='Verbose mode, -vv for more details, -vvv for 3rd-parties logs as well'
)
parser.add_argument(
'-c', '--config-dir',
help='Specify config root path [default: %(default)s]',
dest='config_path',
default=DEFAULT_CONFIG_PATH
)
subparsers = parser.add_subparsers(help='top commands')
config_parser = subparsers.add_parser(
'config',
help='Manipulate DNS cloud configuration'
)
config_subparsers = config_parser.add_subparsers(help='config commands')
config_push_parser = config_subparsers.add_parser(
'push',
help='Push configuration to Google Storage'
)
config_push_parser.add_argument('profile')
config_push_parser.add_argument('bucket')
config_push_parser.set_defaults(func=config_push)
config_pull_parser = config_subparsers.add_parser(
'pull',
help='Retrieve latest configuration from Google Storage'
)
config_pull_parser.add_argument('profile')
config_pull_parser.add_argument('bucket')
config_pull_parser.add_argument(
"identity",
help='Keybase signature to use to decrypt configuration, for instance: github://tristan0x'
)
etc_hosts_parser = subparsers.add_parser(
'etc-hosts',
help='Manipulate DNS cloud configuration'
)
etc_hosts_subparsers = etc_hosts_parser.add_subparsers(help='etc-hosts commands')
etc_hosts_update_parser = etc_hosts_subparsers.add_parser(
"update",
help='Required super-user privileges'
)
etc_hosts_update_parser.add_argument(
'-o', '--ouput',
dest='output_file',
default='/etc/hosts',
help='Output file [default: %(default)s]'
)
etc_hosts_update_parser.set_defaults(func=etc_hosts_update)
etc_hosts_list_parser = etc_hosts_subparsers.add_parser(
"list",
help="List nodes in /etc/hosts format"
)
etc_hosts_list_parser.set_defaults(func=etc_hosts_list)
dns_server_parser = subparsers.add_parser(
'server',
help='Start DNS server'
)
dns_server_subparsers = dns_server_parser.add_subparsers(help='server commands')
dns_server_zone_parser = dns_server_subparsers.add_parser(
"zone",
help='Show DNS zone file'
)
dns_server_zone_parser.set_defaults(func=server_zone_list)
dns_server_start_parser = dns_server_subparsers.add_parser(
"start",
help='Start DNS server'
)
dns_server_start_parser.add_argument(
'--zone',
default=None,
help='Optional DNS zone file ("-" for stdin)'
)
dns_server_start_parser.add_argument(
'--ttl',
default=3600,
type=int,
help='Profile reload interval (in seconds) [default: %(default)s]'
)
dns_server_start_parser.set_defaults(func=server_start)
config_pull_parser.set_defaults(func=config_pull)
args = parser.parse_args(args)
log_level = logging.WARN
third_parties_log_level = logging.WARN
if args.verbose:
if args.verbose > 1:
log_level = logging.DEBUG
else:
log_level = logging.INFO
if args.verbose >= 3:
third_parties_log_level = logging.INFO
logging.basicConfig(level=log_level)
for logger in [
'boto',
'gnupg',
'oauth2client',
'oauth2_client',
'requests',
]:
logging.getLogger(logger).setLevel(third_parties_log_level)
args.func(**vars(args))
| 34.178914 | 101 | 0.617125 |
import argparse
import itertools
import logging
import os
import os.path as osp
import StringIO
import signal
import sys
import threading
import time
from dnslib import RR,QTYPE,RCODE
from dnslib.server import DNSServer,DNSHandler,BaseResolver,DNSLogger
from .config import (
DEFAULT_CONFIG_PATH,
GSDriver,
GStorageKeybaseProfile,
Profile,
Profiles,
)
def config_pull(profile, bucket, identity, **kwargs):
keybase_id = identity.split("://", 1)
profile = GStorageKeybaseProfile(profile, GSDriver, bucket, keybase_id, **kwargs)
profile.pull()
def config_push(profile, bucket, **kwargs):
"""Push encryted configuration of a profile on Google Storage
:param profile: profile to push (a directory in ~/.config/cloud-dns/)
:param bucket: the destination Google Storage bucket
:param config_dir: absolute path to Cloud DNS root config dir
(default ~/.config/cloud-dns)
"""
profile = GStorageKeybaseProfile(profile, GSDriver, bucket, **kwargs)
profile.push()
class ZoneResolver(BaseResolver):
"""
Simple fixed zone file resolver.
"""
def __init__(self, zone_file_generator, glob=False, ttl=3600):
"""
Initialise resolver from zone file.
Stores RRs as a list of (label,type,rr) tuples
If 'glob' is True use glob match against zone file
"""
self.glob = glob
self.eq = 'matchGlob' if glob else '__eq__'
self.zone_file_generator = zone_file_generator
self.load()
if ttl > 0:
thread = threading.Thread(target=self.reload, args=(ttl,))
thread.daemon = True
thread.start()
def load(self):
logging.info("Loading DNS information from cloud providers")
self.zone = [(rr.rname,QTYPE[rr.rtype],rr) for rr in RR.fromZone(self.zone_file_generator())]
def reload(self, ttl):
while True:
time.sleep(ttl)
logging.info("Updating DNS information from cloud providers")
self.load()
def resolve(self,request,handler):
"""
Respond to DNS request - parameters are request packet & handler.
Method is expected to return DNS response
"""
reply = request.reply()
qname = request.q.qname
qtype = QTYPE[request.q.qtype]
local_zone = self.zone
for name, rtype, rr in local_zone:
# Check if label & type match
if getattr(qname,self.eq)(name) and (qtype == rtype or
qtype == 'ANY' or
rtype == 'CNAME'):
# If we have a glob match fix reply label
if self.glob:
a = copy.copy(rr)
a.rname = qname
reply.add_answer(a)
else:
reply.add_answer(rr)
# Check for A/AAAA records associated with reply and
# add in additional section
if rtype in ['CNAME','NS','MX','PTR']:
for a_name,a_rtype,a_rr in local_zone:
if a_name == rr.rdata.label and a_rtype in ['A','AAAA']:
reply.add_ar(a_rr)
if not reply.rr:
reply.header.rcode = RCODE.SERVFAIL
return reply
def server_start(zone=None, ttl=3600, **kwargs):
if zone == None:
def zone_builder():
zone = StringIO.StringIO()
server_zone_list(zone=zone)
zone.seek(0)
return zone
elif zone == '-':
def zone_builder():
return sys.stdin
else:
def zone_builder():
return open(zone)
resolver = ZoneResolver(zone_builder, False, ttl)
logger = DNSLogger("request,reply,truncated,error", False)
def reload_dns_config(signum, frame):
if signum == signal.SIGUSR1:
resolver.load()
signal.signal(signal.SIGUSR1, reload_dns_config)
udp_server = DNSServer(resolver,
port=53,
address="",
logger=logger)
udp_server.start()
def server_zone_list(zone=None, **kwargs):
zone = zone or sys.stdout
for profile in Profiles(**kwargs).list():
profile.write_dns_file(zone)
def update_etc_hosts_file(hostip_tuples, output_file=None):
"""Update specified nodes in /etc/hosts
Previous content is not lost
:param hostip_tuples: generator of tuple (host, ip)
:param output_file: destination file, default is /etc/hosts
"""
BEGIN_MARKUP = '# CloudDNS prelude - DO NOT REMOVE\n'
END_MARKUP = '# CloudDNS epilogue - DO NOT REMOVE\n'
output_file = output_file or '/etc/hosts'
if not osp.isfile(output_file):
with open(output_file, 'a'):
os.utime(output_file, None)
with open(output_file, 'r+') as etc_hosts:
lines = etc_hosts.readlines()
etc_hosts.seek(0)
etc_hosts.truncate(0)
previous_content_replaced = False
between_markups = False
for line in lines:
if not between_markups:
if line == BEGIN_MARKUP:
between_markups = True
etc_hosts.write(line)
else:
if line == END_MARKUP:
previous_content_replaced = True
for hosts, ip in hostip_tuples:
etc_hosts.write("{} {}\n".format(ip.ljust(15, ' '), ' '.join(hosts)))
between_markups = False
etc_hosts.write(line)
if not previous_content_replaced:
etc_hosts.write(BEGIN_MARKUP)
for hosts, ip in hostip_tuples:
etc_hosts.write("{} {}\n".format(ip.ljust(15, ' '), ' '.join(hosts)))
etc_hosts.write(END_MARKUP)
def etc_hosts_update(output_file=None, **kwargs):
"""Update /etc/hosts with all nodes available in configured projects
:param output_file: destination file, default is /etc/hosts
"""
update_etc_hosts_file(etc_hosts_generator(**kwargs), output_file)
def etc_hosts_generator(**kwargs):
"""Provides a generator of tuple (hosts, ip) for all nodes registered
in the configured projects
"""
generators = []
for profile in Profiles(**kwargs).list():
for project in profile.projects.values():
generators.append(project.get_hostip_tuples())
return itertools.chain(*generators)
def etc_hosts_list(**kwargs):
"""Print to standard output nodes available in all configured projects
"""
for hosts, ip in etc_hosts_generator(**kwargs):
print "{} {}".format(ip.ljust(15, ' '), ' '.join(hosts))
def cloud_dns(args=None):
"""cloud-dns entry point"""
args = args or sys.argv[1:]
from .version import version
parser = argparse.ArgumentParser(
description="DNS utilities on top of Apache libcloud"
)
parser.add_argument(
'-V', '--version',
action='version',
version='%(proj)s ' + version
)
parser.add_argument(
'-v', '--verbose',
action='count',
help='Verbose mode, -vv for more details, -vvv for 3rd-parties logs as well'
)
parser.add_argument(
'-c', '--config-dir',
help='Specify config root path [default: %(default)s]',
dest='config_path',
default=DEFAULT_CONFIG_PATH
)
subparsers = parser.add_subparsers(help='top commands')
config_parser = subparsers.add_parser(
'config',
help='Manipulate DNS cloud configuration'
)
config_subparsers = config_parser.add_subparsers(help='config commands')
config_push_parser = config_subparsers.add_parser(
'push',
help='Push configuration to Google Storage'
)
config_push_parser.add_argument('profile')
config_push_parser.add_argument('bucket')
config_push_parser.set_defaults(func=config_push)
config_pull_parser = config_subparsers.add_parser(
'pull',
help='Retrieve latest configuration from Google Storage'
)
config_pull_parser.add_argument('profile')
config_pull_parser.add_argument('bucket')
config_pull_parser.add_argument(
"identity",
help='Keybase signature to use to decrypt configuration, for instance: github://tristan0x'
)
etc_hosts_parser = subparsers.add_parser(
'etc-hosts',
help='Manipulate DNS cloud configuration'
)
etc_hosts_subparsers = etc_hosts_parser.add_subparsers(help='etc-hosts commands')
etc_hosts_update_parser = etc_hosts_subparsers.add_parser(
"update",
help='Required super-user privileges'
)
etc_hosts_update_parser.add_argument(
'-o', '--ouput',
dest='output_file',
default='/etc/hosts',
help='Output file [default: %(default)s]'
)
etc_hosts_update_parser.set_defaults(func=etc_hosts_update)
etc_hosts_list_parser = etc_hosts_subparsers.add_parser(
"list",
help="List nodes in /etc/hosts format"
)
etc_hosts_list_parser.set_defaults(func=etc_hosts_list)
dns_server_parser = subparsers.add_parser(
'server',
help='Start DNS server'
)
dns_server_subparsers = dns_server_parser.add_subparsers(help='server commands')
dns_server_zone_parser = dns_server_subparsers.add_parser(
"zone",
help='Show DNS zone file'
)
dns_server_zone_parser.set_defaults(func=server_zone_list)
dns_server_start_parser = dns_server_subparsers.add_parser(
"start",
help='Start DNS server'
)
dns_server_start_parser.add_argument(
'--zone',
default=None,
help='Optional DNS zone file ("-" for stdin)'
)
dns_server_start_parser.add_argument(
'--ttl',
default=3600,
type=int,
help='Profile reload interval (in seconds) [default: %(default)s]'
)
dns_server_start_parser.set_defaults(func=server_start)
config_pull_parser.set_defaults(func=config_pull)
args = parser.parse_args(args)
log_level = logging.WARN
third_parties_log_level = logging.WARN
if args.verbose:
if args.verbose > 1:
log_level = logging.DEBUG
else:
log_level = logging.INFO
if args.verbose >= 3:
third_parties_log_level = logging.INFO
logging.basicConfig(level=log_level)
for logger in [
'boto',
'gnupg',
'oauth2client',
'oauth2_client',
'requests',
]:
logging.getLogger(logger).setLevel(third_parties_log_level)
args.func(**vars(args))
| 1,430 | 0 | 123 |
31cc762447ae26742a81d5ea2342bd2de3e06526 | 19,983 | py | Python | src/strangan.py | azmfaridee/strangan-chase-2021 | 4d225ea073a6e890a235e43f5860cd3b8e6eae12 | [
"Apache-2.0"
] | null | null | null | src/strangan.py | azmfaridee/strangan-chase-2021 | 4d225ea073a6e890a235e43f5860cd3b8e6eae12 | [
"Apache-2.0"
] | null | null | null | src/strangan.py | azmfaridee/strangan-chase-2021 | 4d225ea073a6e890a235e43f5860cd3b8e6eae12 | [
"Apache-2.0"
] | null | null | null | # %%
import collections
import os
import sys
import time
from pprint import pformat
import ipdb
import numpy as np
import torch
import torch.autograd
import torch.nn as nn
from loguru import logger
from sklearn.metrics import precision_recall_fscore_support
from torch.optim import SGD, Adam
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataloader import InfiniteDataLoader
from dataset import ActivityDataset
from helpers import make_arg_parser
from net_utils import set_deterministic_and_get_rng
from nets import Classifier, Discriminator, SpatialTransformerBlock
logger.remove()
logger.add(sys.stdout, colorize=True, format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{message}</level>")
class StranGAN(object):
"""
STranGAN: Adversarially-learnt Spatial Transformer for scalable human activity recognition
"""
@torch.no_grad()
def train_clf(self, source_loader_train, source_loader_val, target_loader_val, args):
"""
Trains the source classifier
"""
source_metrics_train = {}
source_metrics_val = {}
target_metrics_val = {}
if args.clf_ckpt != '' and os.path.exists(args.clf_ckpt):
logger.info(f'Loading Classifier from {args.clf_ckpt} ...')
self.classifier.load_state_dict(torch.load(args.clf_ckpt))
logger.success('Model loaded!')
source_metrics_train = self.test(self.classifier, source_loader_train, 'source (train)')
source_metrics_val = self.test(self.classifier, source_loader_val, 'source (val) ')
target_metrics_val = self.test(self.classifier, target_loader_val, 'target (val) ')
else:
for epoch in range(1, args.n_epochs + 1):
ts = time.time()
self.classifier.train()
for batch_idx, (X_source, y_source) in enumerate(source_loader_train):
X_source = X_source.to(self.device).float()
y_source = y_source.to(self.device)
self.optim_c.zero_grad()
y_source_pred = self.classifier(X_source)
loss_fc = self.clf_loss(y_source_pred, y_source)
loss_fc.backward()
self.optim_c.step()
if batch_idx % args.log_interval == 0:
logger.info(
f'CLF train epoch: {epoch:2d} {100. * batch_idx / len(source_loader_train):3.0f}%'
+ f' {batch_idx * len(X_source):5d}/{len(source_loader_train.dataset)} lC={loss_fc.item():.6f}'
)
te = time.time()
logger.info(f'Took {(te - ts):.2f} seconds this epoch')
logger.info('------------------------------------------------')
source_metrics_train = self.test(self.classifier, source_loader_train, 'source (train)')
source_metrics_val = self.test(self.classifier, source_loader_val, 'source (val) ')
target_metrics_val = self.test(self.classifier, target_loader_val, 'target (val) ')
logger.info('------------------------------------------------')
save_path = os.path.join(args.save_dir, 'clf.pt')
logger.info(f'Saving the Classifier in {save_path}')
torch.save(self.classifier.state_dict(), save_path)
return {
'source-train': source_metrics_train,
'source-val' : source_metrics_val,
'target-val' : target_metrics_val
}
@torch.no_grad()
def interpret(self, source_loader, target_loader, args):
"""
Save the transformed target samples and corresponding thetas for further analysis
:param source_loader:
:param target_loader:
:param args:
:return:
"""
if args.clf_ckpt != '' and os.path.exists(args.clf_ckpt):
logger.info(f'Loading Classifier from {args.clf_ckpt} ...')
self.classifier.load_state_dict(torch.load(args.clf_ckpt))
logger.success('Model loaded!')
if args.gen_ckpt != '' and os.path.exists(args.gen_ckpt):
logger.info(f'Loading Generator from {args.gen_ckpt} ...')
self.generator.load_state_dict(torch.load(args.gen_ckpt))
logger.success('Model loaded!')
self.classifier.eval()
self.generator.eval()
thetas, target_data, xformed, source_data = [], [], [], []
for data, target in target_loader:
data = data.to(self.device).float()
data_xformed, theta = self.generator(data)
thetas.append(theta)
target_data.append(data)
xformed.append(data_xformed)
for data, target in source_loader:
data = data.to(self.device).float()
source_data.append(data)
thetas = torch.cat(thetas).cpu().numpy()
target_data = torch.cat(target_data).cpu().numpy()
source_data = torch.cat(source_data).cpu().numpy()
xformed = torch.cat(xformed).cpu().numpy()
theta_path = os.path.join(args.save_dir, 'thetas')
logger.info('Saving theta, target, transformed target and source data to {}'.format(theta_path))
np.savez_compressed(theta_path,
thetas=thetas, target_data=target_data, source_data=source_data, xformed=xformed)
logger.success('Data saved!')
# %%
parser = make_arg_parser()
args = parser.parse_args()
rng, seed_worker = set_deterministic_and_get_rng(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
logger.add(os.path.join(args.save_dir, "training.log"))
logger.info(f'Current experiment parameters:\n{pformat(vars(args))}')
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with np.load(args.data_path, mmap_mode='r', allow_pickle=True) as npz:
if args.subject_source.find(',') > 0:
data_source = np.concatenate([
npz['data_{}_{}'.format(ss, args.position_source)]
for ss in tqdm(args.subject_source.split(','), 'creating source dataset')
])
else:
data_source = npz['data_{}_{}'.format(args.subject_source,
args.position_source)]
if args.subject_target.find(',') > 0:
data_target = np.concatenate([
npz['data_{}_{}'.format(st, args.position_target)]
for st in tqdm(args.subject_target.split(','), 'creating target dataset')
])
else:
data_target = npz['data_{}_{}'.format(args.subject_target,
args.position_target)]
source_train_dataset = ActivityDataset(data_source, args.window_size, args.n_channels, args.scaling,
shuffle=False, train_set=True, train_frac=args.train_frac)
lencoder = source_train_dataset.lencoder
source_val_dataset = ActivityDataset(data_source, args.window_size, args.n_channels, args.scaling, lencoder=lencoder,
shuffle=False, train_set=False, train_frac=args.train_frac)
target_train_dataset = ActivityDataset(data_target, args.window_size, args.n_channels, args.scaling,
lencoder=lencoder, shuffle=False, train_set=True,
train_frac=args.train_frac)
target_val_dataset = ActivityDataset(data_target, args.window_size, args.n_channels, args.scaling,
lencoder=lencoder, shuffle=False, train_set=False,
train_frac=args.train_frac)
# data loader for DA training
# -----------------------------------------------------------------------------------------------------------------------
source_loader_da = InfiniteDataLoader(source_train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
target_loader_da = InfiniteDataLoader(target_train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
# data loader for classification
# -----------------------------------------------------------------------------------------------------------------------
# training
source_loader_clf_train = DataLoader(source_train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
# validation
source_loader_clf_val = DataLoader(source_val_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
target_loader_clf_val = DataLoader(target_val_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
strangan = StranGAN(device, args)
strangan.train_gan(source_loader_da, target_loader_da, source_loader_clf_train, source_loader_clf_val,
target_loader_clf_val, args)
strangan.interpret(source_loader_clf_val, target_loader_clf_val, args)
| 46.150115 | 131 | 0.580644 | # %%
import collections
import os
import sys
import time
from pprint import pformat
import ipdb
import numpy as np
import torch
import torch.autograd
import torch.nn as nn
from loguru import logger
from sklearn.metrics import precision_recall_fscore_support
from torch.optim import SGD, Adam
from torch.utils.data import DataLoader
from tqdm import tqdm
from dataloader import InfiniteDataLoader
from dataset import ActivityDataset
from helpers import make_arg_parser
from net_utils import set_deterministic_and_get_rng
from nets import Classifier, Discriminator, SpatialTransformerBlock
logger.remove()
logger.add(sys.stdout, colorize=True, format="<green>{time:YYYY-MM-DD HH:mm:ss.SSS}</green> <level>{message}</level>")
class StranGAN(object):
"""
STranGAN: Adversarially-learnt Spatial Transformer for scalable human activity recognition
"""
def __init__(self, device, args):
super(StranGAN, self).__init__()
self.n_classes = args.n_classes
self.n_channels = args.n_channels
self.window_size = args.window_size
self.device = device
self.log_interval = args.log_interval
self.classifier = Classifier(args.n_channels, args.n_classes).to(device)
self.discriminator = Discriminator(args.n_channels).to(device)
# self.generator = Generator(2, args.n_channels, args.window_size).to(device)
# self.generator = nn.Sequential(
# *[SpatialTransformerBlock(args.n_channels, args.window_size) for i in range(2)]
# ).to(device)
self.generator = SpatialTransformerBlock(args.n_channels, args.window_size).to(device)
logger.info(self.classifier)
logger.info(self.discriminator)
logger.info(self.generator)
self.adversarial_loss = torch.nn.BCEWithLogitsLoss(reduction='mean').to(device)
self.clf_loss = nn.NLLLoss().to(device)
self.recon_loss = nn.SmoothL1Loss().to(device)
# self.recon_loss = nn.MSELoss().to(device)
self.optim_c = Adam(self.classifier.parameters(), lr=args.lr_FC,
betas=(args.lr_FC_b1, args.lr_FC_b2)
# amsgrad=True, weight_decay=1e-6
)
"""
https://sthalles.github.io/advanced_gans/
The discriminator trains with a learning rate 4 times greater than G - 0.004 and 0.001 respectively.
A larger learning rate means that the discriminator will absorb a larger part of the gradient signal.
Hence, a higher learning rate eases the problem of slow learning of the regularized discriminator.
Also, this approach makes it possible to use the same rate of updates for the generator and
the discriminator. In fact, we use a 1:1 update interval between generator and discriminator.
"""
self.optim_d = SGD(self.discriminator.parameters(),
lr=args.lr_FD, weight_decay=1e-6) # 0.000002, momentum=0.9
# self.optim_d = Adam(self.discriminator.parameters(),
# lr=args.lr_FD, weight_decay=1e-6) # 0.000002, momentum=0.9
self.optim_g = Adam(self.generator.parameters(),
lr=args.lr_G,
betas=(args.lr_G_b1, args.lr_G_b2),
amsgrad=False, weight_decay=1e-6) # 0.0002
# stochastic weight average
# self.generator_swa = AveragedModel(self.generator)
# self.scheduler = CosineAnnealingLR(self.optim_g, T_max=100)
# self.swa_start = 5000
# self.swa_scheduler = SWALR(self.optim_g, swa_lr=0.05)
# self.scheduler_g = StepLR(self.optim_g, step_size=1000, gamma=0.5)
@torch.no_grad()
def test(self, model, test_loader, stage='train', generator=None):
model.eval()
if generator: generator.eval()
loss = 0
correct = 0
y_true, y_pred = [], []
for data, target in test_loader:
data = data.to(self.device).float()
target = target.to(self.device)
y_true.append(target)
if generator:
data_, _ = generator(data)
output = model(data_)
else:
output = model(data)
# sum up batch loss
loss += self.clf_loss(output, target).item()
# get the index of the max log-probability
pred = output.argmax(dim=1, keepdim=True)
y_pred.append(pred.view_as(target))
correct += pred.eq(target.view_as(pred)).sum().item()
loss /= len(test_loader.dataset)
acc = correct / len(test_loader.dataset)
y_true = torch.cat(y_true, 0).cpu().numpy()
y_pred = torch.cat(y_pred, 0).cpu().numpy()
precision, recall, f1, support = precision_recall_fscore_support(
y_true, y_pred, average='micro')
logger.info(
f'CLF eval {stage} loss={loss:.6f} acc={acc * 100:3.2f}% {correct:5d}/{len(test_loader.dataset):5d} f1={f1:.4f}')
return {
'loss' : loss,
'acc' : acc,
'precision': precision,
'recall' : recall,
'f1' : f1,
'support' : support
}
def train_clf(self, source_loader_train, source_loader_val, target_loader_val, args):
"""
Trains the source classifier
"""
source_metrics_train = {}
source_metrics_val = {}
target_metrics_val = {}
if args.clf_ckpt != '' and os.path.exists(args.clf_ckpt):
logger.info(f'Loading Classifier from {args.clf_ckpt} ...')
self.classifier.load_state_dict(torch.load(args.clf_ckpt))
logger.success('Model loaded!')
source_metrics_train = self.test(self.classifier, source_loader_train, 'source (train)')
source_metrics_val = self.test(self.classifier, source_loader_val, 'source (val) ')
target_metrics_val = self.test(self.classifier, target_loader_val, 'target (val) ')
else:
for epoch in range(1, args.n_epochs + 1):
ts = time.time()
self.classifier.train()
for batch_idx, (X_source, y_source) in enumerate(source_loader_train):
X_source = X_source.to(self.device).float()
y_source = y_source.to(self.device)
self.optim_c.zero_grad()
y_source_pred = self.classifier(X_source)
loss_fc = self.clf_loss(y_source_pred, y_source)
loss_fc.backward()
self.optim_c.step()
if batch_idx % args.log_interval == 0:
logger.info(
f'CLF train epoch: {epoch:2d} {100. * batch_idx / len(source_loader_train):3.0f}%'
+ f' {batch_idx * len(X_source):5d}/{len(source_loader_train.dataset)} lC={loss_fc.item():.6f}'
)
te = time.time()
logger.info(f'Took {(te - ts):.2f} seconds this epoch')
logger.info('------------------------------------------------')
source_metrics_train = self.test(self.classifier, source_loader_train, 'source (train)')
source_metrics_val = self.test(self.classifier, source_loader_val, 'source (val) ')
target_metrics_val = self.test(self.classifier, target_loader_val, 'target (val) ')
logger.info('------------------------------------------------')
save_path = os.path.join(args.save_dir, 'clf.pt')
logger.info(f'Saving the Classifier in {save_path}')
torch.save(self.classifier.state_dict(), save_path)
return {
'source-train': source_metrics_train,
'source-val' : source_metrics_val,
'target-val' : target_metrics_val
}
def train_gan(self, source_loader_da, target_loader_da, source_loader_clf_train,
source_loader_clf_val,
target_loader_clf_val, args):
# ----------------------------
# First train the source classifier
# ----------------------------
self.train_clf(source_loader_clf_train, source_loader_clf_val, target_loader_clf_val, args)
best_f1 = 0.0
# check for resume
if args.resume_gan:
if args.gen_ckpt != '' and os.path.exists(args.gen_ckpt):
logger.info(f'Loading Generator from {args.gen_ckpt} ...')
self.generator.load_state_dict(torch.load(args.gen_ckpt))
logger.success('Model loaded!')
if args.dsc_ckpt != '' and os.path.exists(args.dsc_ckpt):
logger.info(f'Loading Discriminator from {args.dsc_ckpt} ...')
self.discriminator.load_state_dict(torch.load(args.dsc_ckpt))
logger.success('Model loaded!')
_ = self.test(self.classifier, target_loader_clf_val,
'target (xformed)', self.generator)
best_f1 = _['f1']
logger.info(f'Best result {best_f1}')
# ----------------------------
# Now train the target network
# ----------------------------
valid = torch.ones(args.batch_size, 1, requires_grad=False).to(self.device) * args.soft_label_valid_disc
fake = torch.ones(args.batch_size, 1, requires_grad=False).to(self.device) * args.soft_label_fake
valid_alt = torch.ones(args.batch_size, 1, requires_grad=False).to(self.device) * args.soft_label_valid_gen
source_iterator = iter(source_loader_da)
target_iterator = iter(target_loader_da)
step = 1
while target_loader_da.epoch < args.gan_epochs:
self.classifier.eval()
self.discriminator.train()
self.generator.train()
X_source, y_source = next(source_iterator)
X_target, y_target = next(target_iterator)
X_source = X_source.to(self.device).float()
y_source = y_source.to(self.device)
X_target = X_target.to(self.device).float()
y_target = y_target.to(self.device)
# -----------------
# Train Generator
# -----------------
self.optim_g.zero_grad()
X_gen, _ = self.generator(X_target)
X_gen_source, _ = self.generator(X_source)
loss_g_adv = self.adversarial_loss(self.discriminator(X_gen), valid_alt)
loss_g_rec = self.recon_loss(X_gen_source, X_source)
gamma = args.gamma
# gamma = (np.e**((step-1)/1000)-1)/(np.e**((step-1)/1000)+1)
# gamma = 0.95+ 0.05 * np.sin(step/100)
# gamma = 0
loss_g = loss_g_adv + loss_g_rec * gamma
"""
workaround for the following error:
'RuntimeError: scatter_add_cuda_kernel does not have a deterministic implementation,
but you set 'torch.use_deterministic_algorithms(True)'.
You can turn off determinism just for this operation if that's acceptable for your application.'
"""
torch.use_deterministic_algorithms(False)
loss_g.backward()
torch.use_deterministic_algorithms(True)
self.optim_g.step()
# self.scheduler_g.step()
# SWA
# if step % 1000 == 0:
# if step > self.swa_start:
# self.generator_swa.update_parameters(self.generator)
# self.swa_scheduler.step()
# else:
# self.scheduler.step()
# --------------------------------
# Train the domain discriminator
# --------------------------------
self.optim_d.zero_grad()
pred_valid = self.discriminator(X_source)
pred_fake = self.discriminator(X_gen.detach())
disc_acc = ((pred_valid.round().eq(valid.round()) * 1).sum().item() +
(pred_fake.round().eq(fake.round()) * 1).sum().item()) / (
args.batch_size * 2)
loss_real = self.adversarial_loss(pred_valid, valid)
loss_fake = self.adversarial_loss(pred_fake, fake)
loss_d = (loss_real + loss_fake) / 2
loss_d.backward()
self.optim_d.step()
if step % args.log_interval == 0:
logger.info(
f"GAN tgt_epoch:{target_loader_da.epoch:3d} src_epoch:{source_loader_da.epoch:3d} step:{step:5d}"
+ f" lD={loss_d.item():.4f}"
+ f" lG={loss_g.item():.4f} lGr={loss_g_rec.item():.4f} lGa={loss_g_adv.item():.4f}"
+ f" accD={disc_acc:.4f} gamma={gamma:.2f}")
if step % args.eval_interval == 0:
logger.info(
'------------------------------------------------------------------------------------------------------------')
self.test(self.classifier, source_loader_clf_val, 'source (val+transformed)', self.generator)
_ = self.test(self.classifier, target_loader_clf_val, 'target (val+transformed)', self.generator)
logger.info(
'------------------------------------------------------------------------------------------------------------')
if _['f1'] > best_f1:
logger.info('Updating best model!')
best_f1 = _['f1']
gen_save_path = os.path.join(args.save_dir, 'gen.pt')
dsc_save_path = os.path.join(args.save_dir, 'dsc.pt')
logger.info(f'Saving the generator and discriminator in folder {args.save_dir}')
torch.save(self.generator.state_dict(), gen_save_path)
torch.save(self.discriminator.state_dict(), dsc_save_path)
logger.success('Model saved!')
step += 1
@torch.no_grad()
def interpret(self, source_loader, target_loader, args):
"""
Save the transformed target samples and corresponding thetas for further analysis
:param source_loader:
:param target_loader:
:param args:
:return:
"""
if args.clf_ckpt != '' and os.path.exists(args.clf_ckpt):
logger.info(f'Loading Classifier from {args.clf_ckpt} ...')
self.classifier.load_state_dict(torch.load(args.clf_ckpt))
logger.success('Model loaded!')
if args.gen_ckpt != '' and os.path.exists(args.gen_ckpt):
logger.info(f'Loading Generator from {args.gen_ckpt} ...')
self.generator.load_state_dict(torch.load(args.gen_ckpt))
logger.success('Model loaded!')
self.classifier.eval()
self.generator.eval()
thetas, target_data, xformed, source_data = [], [], [], []
for data, target in target_loader:
data = data.to(self.device).float()
data_xformed, theta = self.generator(data)
thetas.append(theta)
target_data.append(data)
xformed.append(data_xformed)
for data, target in source_loader:
data = data.to(self.device).float()
source_data.append(data)
thetas = torch.cat(thetas).cpu().numpy()
target_data = torch.cat(target_data).cpu().numpy()
source_data = torch.cat(source_data).cpu().numpy()
xformed = torch.cat(xformed).cpu().numpy()
theta_path = os.path.join(args.save_dir, 'thetas')
logger.info('Saving theta, target, transformed target and source data to {}'.format(theta_path))
np.savez_compressed(theta_path,
thetas=thetas, target_data=target_data, source_data=source_data, xformed=xformed)
logger.success('Data saved!')
# %%
parser = make_arg_parser()
args = parser.parse_args()
rng, seed_worker = set_deterministic_and_get_rng(args)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
logger.add(os.path.join(args.save_dir, "training.log"))
logger.info(f'Current experiment parameters:\n{pformat(vars(args))}')
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with np.load(args.data_path, mmap_mode='r', allow_pickle=True) as npz:
if args.subject_source.find(',') > 0:
data_source = np.concatenate([
npz['data_{}_{}'.format(ss, args.position_source)]
for ss in tqdm(args.subject_source.split(','), 'creating source dataset')
])
else:
data_source = npz['data_{}_{}'.format(args.subject_source,
args.position_source)]
if args.subject_target.find(',') > 0:
data_target = np.concatenate([
npz['data_{}_{}'.format(st, args.position_target)]
for st in tqdm(args.subject_target.split(','), 'creating target dataset')
])
else:
data_target = npz['data_{}_{}'.format(args.subject_target,
args.position_target)]
source_train_dataset = ActivityDataset(data_source, args.window_size, args.n_channels, args.scaling,
shuffle=False, train_set=True, train_frac=args.train_frac)
lencoder = source_train_dataset.lencoder
source_val_dataset = ActivityDataset(data_source, args.window_size, args.n_channels, args.scaling, lencoder=lencoder,
shuffle=False, train_set=False, train_frac=args.train_frac)
target_train_dataset = ActivityDataset(data_target, args.window_size, args.n_channels, args.scaling,
lencoder=lencoder, shuffle=False, train_set=True,
train_frac=args.train_frac)
target_val_dataset = ActivityDataset(data_target, args.window_size, args.n_channels, args.scaling,
lencoder=lencoder, shuffle=False, train_set=False,
train_frac=args.train_frac)
# data loader for DA training
# -----------------------------------------------------------------------------------------------------------------------
source_loader_da = InfiniteDataLoader(source_train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
target_loader_da = InfiniteDataLoader(target_train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
# data loader for classification
# -----------------------------------------------------------------------------------------------------------------------
# training
source_loader_clf_train = DataLoader(source_train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
# validation
source_loader_clf_val = DataLoader(source_val_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
target_loader_clf_val = DataLoader(target_val_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, generator=rng, worker_init_fn=seed_worker)
strangan = StranGAN(device, args)
strangan.train_gan(source_loader_da, target_loader_da, source_loader_clf_train, source_loader_clf_val,
target_loader_clf_val, args)
strangan.interpret(source_loader_clf_val, target_loader_clf_val, args)
| 10,448 | 0 | 80 |
a1cb78d4a4d171713108b29d8de86ac908e518e7 | 3,304 | py | Python | workalendar/usa/texas.py | ftatarli/workalendar | 111d2268f6153cfa1906823409103f5d532f7b8b | [
"MIT"
] | 2 | 2020-07-15T09:56:41.000Z | 2021-02-04T18:11:28.000Z | workalendar/usa/texas.py | ftatarli/workalendar | 111d2268f6153cfa1906823409103f5d532f7b8b | [
"MIT"
] | null | null | null | workalendar/usa/texas.py | ftatarli/workalendar | 111d2268f6153cfa1906823409103f5d532f7b8b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Texas module
============
This module presents two classes to handle the way state holidays are managed
in Texas.
The :class:`TexasBase` class gathers all available holidays for Texas,
according to this document:
http://www.statutes.legis.state.tx.us/Docs/GV/htm/GV.662.htm
The :class:`Texas` class includes all national and state holidays, as described
in the said document. This should be the "default" Texas calendar class, to be
used in most cases.
But if state holidays are supposed to be observed by most of the workforces,
any employee can chose to skip one of these days and replace it by another.
If at some point you need to create a specific calendar class based on Texas
calendar, you can either use the :class:`TexasBase` class or directly the
:class:`Texas` class and overwrite/override the :method:`get_fixed_holidays()`
and/or :method:`get_variable_days()` to fit your needs.
Example:
.. code::
class TexasCustom(TexasBase):
# This will include the confederate heroes day
texas_include_confederate_heroes = True
FIXED_HOLIDAYS = TexasBase.FIXED_HOLIDAYS + (
(7, 14, "Bastille Day!"),
)
def get_variable_days(self, year):
days = super(TexasCustom, self).get_variable_days(year)
days.append(
(self.get_nth_weekday_in_month(year, 1, 15), "Special Day")
)
return days
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import date
from ..registry_tools import iso_register
from .core import UnitedStates
class TexasBase(UnitedStates):
"""Texas Base (w/o State holidays)"""
include_columbus_day = False
texas_include_confederate_heroes = False
texas_include_independance_day = False
texas_san_jacinto_day = False
texas_emancipation_day = False
texas_lyndon_johnson_day = False
# Non-Texas-specific state holidays
include_thanksgiving_friday = False
include_christmas_eve = False
include_boxing_day = False
@iso_register('US-TX')
class Texas(TexasBase):
"""Texas"""
texas_include_confederate_heroes = True
texas_include_independance_day = True
texas_san_jacinto_day = True
texas_emancipation_day = True
texas_lyndon_johnson_day = True
include_thanksgiving_friday = True
include_christmas_eve = True
include_boxing_day = True
| 30.878505 | 79 | 0.667373 | # -*- coding: utf-8 -*-
"""
Texas module
============
This module presents two classes to handle the way state holidays are managed
in Texas.
The :class:`TexasBase` class gathers all available holidays for Texas,
according to this document:
http://www.statutes.legis.state.tx.us/Docs/GV/htm/GV.662.htm
The :class:`Texas` class includes all national and state holidays, as described
in the said document. This should be the "default" Texas calendar class, to be
used in most cases.
But if state holidays are supposed to be observed by most of the workforces,
any employee can chose to skip one of these days and replace it by another.
If at some point you need to create a specific calendar class based on Texas
calendar, you can either use the :class:`TexasBase` class or directly the
:class:`Texas` class and overwrite/override the :method:`get_fixed_holidays()`
and/or :method:`get_variable_days()` to fit your needs.
Example:
.. code::
class TexasCustom(TexasBase):
# This will include the confederate heroes day
texas_include_confederate_heroes = True
FIXED_HOLIDAYS = TexasBase.FIXED_HOLIDAYS + (
(7, 14, "Bastille Day!"),
)
def get_variable_days(self, year):
days = super(TexasCustom, self).get_variable_days(year)
days.append(
(self.get_nth_weekday_in_month(year, 1, 15), "Special Day")
)
return days
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import date
from ..registry_tools import iso_register
from .core import UnitedStates
class TexasBase(UnitedStates):
"""Texas Base (w/o State holidays)"""
include_columbus_day = False
texas_include_confederate_heroes = False
texas_include_independance_day = False
texas_san_jacinto_day = False
texas_emancipation_day = False
texas_lyndon_johnson_day = False
# Non-Texas-specific state holidays
include_thanksgiving_friday = False
include_christmas_eve = False
include_boxing_day = False
def get_fixed_holidays(self, year):
days = super(TexasBase, self).get_fixed_holidays(year)
if self.texas_include_confederate_heroes:
days.append(
(date(year, 1, 19), "Confederate Heroes Day")
)
if self.texas_include_independance_day:
days.append(
(date(year, 3, 2), "Texas Independence Day")
)
if self.texas_san_jacinto_day:
days.append(
(date(year, 4, 21), "San Jacinto Day")
)
if self.texas_emancipation_day:
days.append(
(date(year, 6, 19), "Emancipation Day in Texas"),
)
if self.texas_lyndon_johnson_day:
days.append(
(date(year, 8, 27), "Lyndon B. Jonhson Day"),
)
return days
@iso_register('US-TX')
class Texas(TexasBase):
"""Texas"""
texas_include_confederate_heroes = True
texas_include_independance_day = True
texas_san_jacinto_day = True
texas_emancipation_day = True
texas_lyndon_johnson_day = True
include_thanksgiving_friday = True
include_christmas_eve = True
include_boxing_day = True
| 821 | 0 | 27 |
382592e6075e2fb4977ac204664c19b9e5ca7092 | 128 | py | Python | core/views.py | johncmacy/django-react-graphql | 723ea2fb7d482d3d955e336dbd099b24cf0c6d3c | [
"MIT"
] | null | null | null | core/views.py | johncmacy/django-react-graphql | 723ea2fb7d482d3d955e336dbd099b24cf0c6d3c | [
"MIT"
] | null | null | null | core/views.py | johncmacy/django-react-graphql | 723ea2fb7d482d3d955e336dbd099b24cf0c6d3c | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import Thing | 25.6 | 45 | 0.773438 | from django.shortcuts import render
from .models import Thing
def index(request):
return render(request, 'core/index.html') | 44 | 0 | 23 |
60571c34a91ef1ad1ac9e6f31a68c05e28febfa4 | 1,159 | py | Python | torchaudio/backend/no_backend.py | adefossez/audio | 19fc580da97baf179395bb257647c5c25b993e42 | [
"BSD-2-Clause"
] | 1 | 2021-04-20T09:04:24.000Z | 2021-04-20T09:04:24.000Z | torchaudio/backend/no_backend.py | adefossez/audio | 19fc580da97baf179395bb257647c5c25b993e42 | [
"BSD-2-Clause"
] | null | null | null | torchaudio/backend/no_backend.py | adefossez/audio | 19fc580da97baf179395bb257647c5c25b993e42 | [
"BSD-2-Clause"
] | 1 | 2019-09-11T08:27:18.000Z | 2019-09-11T08:27:18.000Z | from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from torch import Tensor
from . import common
from .common import SignalInfo, EncodingInfo
@common._impl_load
@common._impl_load_wav
@common._impl_save
@common._impl_info
| 32.194444 | 113 | 0.692839 | from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union
from torch import Tensor
from . import common
from .common import SignalInfo, EncodingInfo
@common._impl_load
def load(filepath: Union[str, Path],
out: Optional[Tensor] = None,
normalization: Union[bool, float, Callable] = True,
channels_first: bool = True,
num_frames: int = 0,
offset: int = 0,
signalinfo: Optional[SignalInfo] = None,
encodinginfo: Optional[EncodingInfo] = None,
filetype: Optional[str] = None) -> Tuple[Tensor, int]:
raise RuntimeError('No audio I/O backend is available.')
@common._impl_load_wav
def load_wav(filepath: Union[str, Path], **kwargs: Any) -> Tuple[Tensor, int]:
raise RuntimeError('No audio I/O backend is available.')
@common._impl_save
def save(filepath: str, src: Tensor, sample_rate: int, precision: int = 16, channels_first: bool = True) -> None:
raise RuntimeError('No audio I/O backend is available.')
@common._impl_info
def info(filepath: str) -> Tuple[SignalInfo, EncodingInfo]:
raise RuntimeError('No audio I/O backend is available.')
| 808 | 0 | 88 |
89ff7a7bae7f3bea554281d5081e20c1ddea119a | 10,593 | py | Python | peddy/tests/test_peddy.py | chapmanb/peddy | 62bed8f00b132677d28336d8a76347f181f9d099 | [
"MIT"
] | null | null | null | peddy/tests/test_peddy.py | chapmanb/peddy | 62bed8f00b132677d28336d8a76347f181f9d099 | [
"MIT"
] | null | null | null | peddy/tests/test_peddy.py | chapmanb/peddy | 62bed8f00b132677d28336d8a76347f181f9d099 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
import os.path as op
import sys
from peddy import Ped, Family, Sample, PHENOTYPE, SEX
HERE = op.dirname(op.dirname(os.path.abspath(os.path.dirname(__file__))))
from contextlib import contextmanager
@contextmanager
| 31.340237 | 429 | 0.571321 | from __future__ import print_function
import os
import os.path as op
import sys
from peddy import Ped, Family, Sample, PHENOTYPE, SEX
HERE = op.dirname(op.dirname(os.path.abspath(os.path.dirname(__file__))))
def test_sample():
s = Sample('fam1', 'sample1', '-9', '-9', '2', '2')
assert s.sex == SEX.FEMALE, (s.sex)
assert s.affected == PHENOTYPE.AFFECTED
assert s.kids == []
def test_sample_str_and_from_row():
s = Sample('fam1', 'sample1', '-9', '-9', '2', '2')
assert str(s) == "fam1 sample1 -9 -9 2 2", str(s)
s2 = Sample.from_row(str(s))
assert s2.sample_id == s.sample_id
assert s2.sex == s.sex
assert s2.family_id == s.family_id
def test_sex_check():
if sys.version_info[0] == 3:
return
p = Ped(op.join(HERE, 'peddy/tests/test.mendel.ped'))
df = p.sex_check(op.join(HERE, 'peddy/tests/test.mendel.vcf.gz'))
assert "predicted_sex" in df.columns
assert "ped_sex", df.columns
assert "error" in df.columns
def test_dict():
s = Sample('fam1', 'sample1', '-9', '-9', '2', '2')
d = s.dict()
assert d == {'maternal_id': '-9', 'paternal_id': '-9', 'sex': 'female',
'family_id': 'fam1', 'phenotype': 'affected', 'sample_id': 'sample1'}, d
s = Sample('fam1', 'sample1', 'dad', 'mom', '1', '1')
d = s.dict()
assert d == {'maternal_id': 'mom', 'paternal_id': 'dad', 'sex': 'male', 'family_id':
'fam1', 'phenotype': 'unaffected', 'sample_id': 'sample1'}
s = Sample('fam1', 'sample1', 'dad', 'mom', '-1', '-1')
d = s.dict()
assert d == {'maternal_id': 'mom', 'paternal_id': 'dad', 'sex': '-9',
'family_id': 'fam1', 'phenotype': 'affected', 'sample_id':
'sample1'}, d
def test_json():
p = Ped(op.join(HERE, 'peddy/tests/test.mendel.ped'))
json = p.to_json()
#expected = '[{"maternal_id": "-9", "paternal_id": "-9", "sex": "male", "family_id": "CEPH1463", "phenotype": "affected", "sample_id": "NA12889"}, {"maternal_id": "-9", "paternal_id": "-9", "sex": "female", "family_id": "CEPH1463", "phenotype": "affected", "sample_id": "NA12890"}, {"maternal_id": "NA12890", "paternal_id": "NA12889", "sex": "male", "family_id": "CEPH1463", "phenotype": "affected", "sample_id": "NA12877"}]'
# this test may fail if order of dicts is changed
assert "CEPH1463" in json, json
def t_ped_check():
try:
import pandas as pd
import cyvcf2
cyvcf2
except ImportError:
return
p = Ped(op.join(HERE, 'peddy/tests/test.mendel.ped'))
v = p.ped_check(op.join(HERE, b'peddy/tests/test.mendel.vcf.gz'))
assert isinstance(v, pd.DataFrame), v
# remove samples
f = list(p.families.values())[0]
l = len(f.samples)
s = f.samples[-1]
f.samples = f.samples[:-1]
assert l -1 == len(f.samples)
v = p.ped_check(op.join(HERE, b'peddy/tests/test.mendel.vcf.gz'))
assert isinstance(v, pd.DataFrame), v
assert "ibs0" in v.columns
# changed the sample id of a sample
s.sample_id = "XDFSDFX"
f.samples.append(s)
v = p.ped_check(op.join(HERE, b'peddy/tests/test.mendel.vcf.gz'))
assert isinstance(v, pd.DataFrame), v
def test_relation():
kid = Sample('fam1', 'kid', 'dad', 'mom', '2', '2')
dad = Sample('fam1', 'dad', '-9', '-9', '1', '2')
mom = Sample('fam1', 'mom', '-9', '-9', '2', '2')
kid.mom = mom
kid.dad = dad
from io import StringIO
p = Ped(StringIO())
p.families['fam1'] = Family([kid, mom, dad])
assert p.relation("mom", "dad") == "mom-dad"
def test_relatedness_coefficient_missing_gparent():
p = Ped(open(os.path.join(HERE, "peddy/tests/test.fam.ped")))
# uncle
v = p.relatedness_coefficient('101806-101806', '101811-101811')
assert v == 0.25, v
v = p.relatedness_coefficient('101806-101806', '101809-101809')
assert v == 0.25, v
# parent-child
v = p.relatedness_coefficient('101806-101806', '101653-101653')
assert v == 0.5, v
p = Ped(open(os.path.join(HERE, "peddy/tests/test.fam2.ped")))
v = p.relatedness_coefficient('101806-101806', '101811-101811')
assert v == 0.25, v
v = p.relatedness_coefficient('101806-101806', '101809-101809')
assert v == 0.25, v
# parent-child
v = p.relatedness_coefficient('101806-101806', '101653-101653')
assert v == 0.5, v
def test_relatedness_coefficient_missing_parent():
gma = Sample('X28935', 'gma', '-9', '-9', '2', '1')
mom = Sample('X28935', 'mom', '-9', 'gma', '2', '1')
dad = Sample('X28935', 'dad', '-9', '-9', '1', '1')
kid1 = Sample('X28935', 'kid1', '-9', 'mom', '1', '1')
kid2 = Sample('X28935', 'kid2', '-9', 'mom', '2', '1')
kid1 = Sample('X28935', 'kid1', 'dad', 'mom', '1', '1')
kid2 = Sample('X28935', 'kid2', 'dad', 'mom', '2', '1')
kid1.mom = mom
kid2.mom = mom
mom.mom = gma
kid1.dad = dad
kid2.dad = dad
from io import StringIO
p = Ped(StringIO())
p.families['X28935'] = Family([kid1, kid2, mom, gma])#, dad])
assert "siblings" in p.relation('kid1', 'kid2'), p.relation('kid1', 'kid2')
v = p.relatedness_coefficient('kid1', 'kid2')
assert v == 0.5, v
v = p.relatedness_coefficient('gma', 'kid2')
assert v == 0.25, v
v = p.relatedness_coefficient('gma', 'kid1')
assert v == 0.25, v
v = p.relatedness_coefficient('gma', 'mom')
assert v == 0.5, v
def test_relatedness_coefficient():
kid = Sample('fam1', 'kid', 'dad', 'mom', '2', '2')
dad = Sample('fam1', 'dad', '-9', '-9', '1', '2')
mom = Sample('fam1', 'mom', '-9', '-9', '2', '2')
gma = Sample('fam1', 'gma', '-9', '-9', '2', '2')
ggma = Sample('fam1', 'ggma', '-9', '-9', '2', '2')
kid.mom = mom
kid.dad = dad
mom.mom = gma
gma.mom = ggma
unrelated = Sample('fam1', 'un', '-9', '-9', '2', '2')
from io import StringIO
p = Ped(StringIO())
p.families['fam1'] = Family([kid, mom, dad, gma, ggma, unrelated])
rel = p.relatedness_coefficient("mom", "dad")
assert rel == 0.0, rel
d = p.relatedness_coefficient("mom", "kid")
assert d == 0.5, d
d = p.relatedness_coefficient("dad", "gma")
assert d == 0.0, d
d = p.relatedness_coefficient("mom", "gma")
assert d == 0.5, d
d = p.relatedness_coefficient("kid", "gma")
assert d == 0.25, d
d = p.relatedness_coefficient("kid", "ggma")
assert d == 0.125, d
assert p.relatedness_coefficient("mom", "mom") == 1.0
#assert p.relatedness_coefficient("mom", "un") == 0.0
from contextlib import contextmanager
@contextmanager
def redirect_err(new_target=None):
if new_target is None:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
new_target = StringIO()
old_target, sys.stderr = sys.stderr, new_target # replace sys.stdout
try:
yield new_target # run some code with the replaced stdout
finally:
sys.stdout = old_target # restore to the previous value
def test_warnings():
with redirect_err() as out:
kid = Sample('fam1', 'kid', 'dad', 'mom', '2', '2')
mom = Sample('fam1', 'mom', '-9', '-9', '1', '2')
dad = Sample('fam1', 'dad', '-9', '-9', '2', '2')
kid.mom = mom
kid.dad = dad
v = out.getvalue()
assert "'dad' is dad but has female sex" in v, v
assert "'mom' is mom but has male sex" in v, v
with redirect_err() as out:
kid = Sample('fam1', 'kid', 'dad', 'mom', '2', '2')
mom = Sample('fam1', 'mom', '-9', '-9', '-9', '2')
kid.mom = mom
v = out.getvalue()
assert "'mom' is mom but has unknown sex. Setting to female" in v
with redirect_err() as out:
kid = Sample('fam1', 'kid', 'dad', 'mom', '2', '2')
dad = Sample('fam1', 'dad', '-9', '-9', '-9', '2')
kid.dad = dad
v = out.getvalue()
assert "'dad' is dad but has unknown sex. Setting to male" in v
with redirect_err() as out:
kid = Sample('fam1', 'kid', 'dad', 'mom', '2', '2')
kid.dad = kid
v = out.getvalue()
assert "'kid' is dad of self" in v, v
def test_family():
kid = Sample('fam1', 'kid', 'dad', 'mom', '2', '2')
mom = Sample('fam1', 'mom', '-9', '-9', '2', '2')
dad = Sample('fam1', 'dad', '-9', '-9', '1', '2')
f = Family([kid, mom, dad])
assert mom.kids == [kid]
assert dad.kids == [kid]
assert kid.dad == dad
assert kid.mom == mom
assert list(f.affecteds) == [kid, mom, dad], list(f.affecteds)
assert list(f.unaffecteds) == []
assert list(f) == [kid, mom, dad]
def test_trios():
p = Ped(op.join(HERE, 'peddy/tests/a.ped'))
f = p.families['family_4']
trios = list(f.trios())
assert len(trios) == 3
assert [t[0] for t in trios] == list(f.affecteds)
def test_ped():
p = Ped(op.join(HERE, 'peddy/tests/a.ped'))
assert len(p.families) == 4
assert len(list(p.samples())) == 14
def test_getattr():
p = Ped(op.join(HERE, 'peddy/tests/a.ped'))
li = list(p.samples(ethnicity='caucasianNEuropean'))
assert len(li) == 5
for item in li:
assert item.ethnicity == 'caucasianNEuropean'
def test_6():
p = Ped(op.join(HERE, 'peddy/tests/a6.ped'))
assert len(list(p.samples())) == 14
for sam in p.samples():
assert sam.family_id[:3] == "fam"
def test_attrs():
kid = Sample('fam1', 'kid', 'dad', 'mom', '2', '2', ['asdf', 'hello'])
assert str(kid) == "fam1 kid dad mom 2 2 asdf hello", str(kid)
assert repr(kid) == "Sample('fam1', 'kid', 'dad', 'mom', 'female', 'affected', ['asdf', 'hello'])", repr(kid)
def test_distant():
p = Ped(op.join(HERE, 'peddy/tests/test-unknown-gma.ped'))
d = p.relatedness_coefficient('kid1', 'cousin1')
assert d == 0.125, d
d = p.relatedness_coefficient('kid1', 'aunt')
assert d == 0.25, d
d = p.relatedness_coefficient('cousin1', 'aunt')
assert d == 0.5, d
d = p.relatedness_coefficient('mom', 'aunt')
assert d == 0.5, d
r = p.relation('kid1', 'cousin1')
assert r == 'cousins', r
r = p.relation('kid1', 'grandma')
assert r == 'grandchild', r
r = p.relation('kid1', 'aunt')
assert r == 'niece/nephew', r
# because we don't know that the uncle is related
r = p.relation('kid1', 'uncle')
assert r == 'related at unknown level', r
r = p.relation('cousin1', 'mom')
assert r == 'niece/nephew', r
r = p.relation('cousin1', 'dad')
# because we don't know that the dad is related
assert r == 'related at unknown level', r
| 9,878 | 0 | 436 |
9d91f5bd94c5f9bfd2dd9928bd7a66bf7826c8ae | 26,520 | py | Python | xena/proto/market_pb2.py | xenaex/client-python | 0870ff52134941e120cad91f0e7bf22af4585ca4 | [
"MIT"
] | 2 | 2019-08-13T08:20:02.000Z | 2019-08-20T15:13:13.000Z | xena/proto/market_pb2.py | xenaex/client-python | 0870ff52134941e120cad91f0e7bf22af4585ca4 | [
"MIT"
] | null | null | null | xena/proto/market_pb2.py | xenaex/client-python | 0870ff52134941e120cad91f0e7bf22af4585ca4 | [
"MIT"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: market.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='market.proto',
package='api',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0cmarket.proto\x12\x03\x61pi\"\xb5\x05\n\x07MDEntry\x12\x16\n\x06Symbol\x18\x37 \x01(\tR\x06symbol\x12\'\n\x0eMDUpdateAction\x18\x97\x02 \x01(\tR\x0emdUpdateAction\x12!\n\x0bMDEntryType\x18\x8d\x02 \x01(\tR\x0bmdEntryType\x12\x1d\n\tMDEntryPx\x18\x8e\x02 \x01(\tR\tmdEntryPx\x12!\n\x0bMDEntrySize\x18\x8f\x02 \x01(\tR\x0bmdEntrySize\x12\'\n\x0eNumberOfOrders\x18\xda\x02 \x01(\rR\x0enumberOfOrders\x12\"\n\x0cTransactTime\x18< \x01(\x03R\x0ctransactTime\x12\x19\n\x07TradeId\x18\xeb\x07 \x01(\tR\x07tradeId\x12%\n\rAggressorSide\x18\xdd\x0b \x01(\tR\raggressorSide\x12\x19\n\x07\x46irstPx\x18\x81\x08 \x01(\tR\x07\x66irstPx\x12\x16\n\x06LastPx\x18\x1f \x01(\tR\x06lastPx\x12\x17\n\x06HighPx\x18\xcc\x02 \x01(\tR\x06highPx\x12\x15\n\x05LowPx\x18\xcd\x02 \x01(\tR\x05lowPx\x12\x1d\n\tBuyVolume\x18\xca\x02 \x01(\tR\tbuyVolume\x12\x1f\n\nSellVolume\x18\xcb\x02 \x01(\tR\nsellVolume\x12\x11\n\x03\x42id\x18\xde\x0b \x01(\tR\x03\x62id\x12\x11\n\x03\x41sk\x18\xdf\x0b \x01(\tR\x03\x61sk\x12 \n\nLowRangePx\x18\x91\x96\x02 \x01(\tR\nlowRangePx\x12\"\n\x0bHighRangePx\x18\x92\x96\x02 \x01(\tR\x0bhighRangePx\x12 \n\nLowLimitPx\x18\x93\x96\x02 \x01(\tR\nlowLimitPx\x12\"\n\x0bHighLimitPx\x18\x94\x96\x02 \x01(\tR\x0bhighLimitPx\x12 \n\nClearingPx\x18\x95\x96\x02 \x01(\tR\nclearingPx\"\xe4\x03\n\x11MarketDataRefresh\x12\x18\n\x07MsgType\x18# \x01(\tR\x07msgType\x12\x1f\n\nMDStreamId\x18\xdc\x0b \x01(\tR\nmdStreamId\x12\'\n\x0eLastUpdateTime\x18\x8b\x06 \x01(\x03R\x0elastUpdateTime\x12\x1f\n\nMDBookType\x18\xfd\x07 \x01(\tR\nmdBookType\x12\x16\n\x06Symbol\x18\x37 \x01(\tR\x06symbol\x12 \n\nLowRangePx\x18\x91\x96\x02 \x01(\tR\nlowRangePx\x12\"\n\x0bHighRangePx\x18\x92\x96\x02 \x01(\tR\x0bhighRangePx\x12 \n\nLowLimitPx\x18\x93\x96\x02 \x01(\tR\nlowLimitPx\x12\"\n\x0bHighLimitPx\x18\x94\x96\x02 \x01(\tR\x0bhighLimitPx\x12 \n\nClearingPx\x18\x95\x96\x02 \x01(\tR\nclearingPx\x12\x19\n\x07\x42\x65stBid\x18\xde\x0b \x01(\tR\x07\x62\x65stBid\x12\x19\n\x07\x42\x65stAsk\x18\xdf\x0b \x01(\tR\x07\x62\x65stAsk\x12\'\n\x07MDEntry\x18\x8c\x02 \x03(\x0b\x32\x0c.api.MDEntryR\x07mdEntry\x12%\n\x06Ratios\x18\xe0\x0b \x03(\x0b\x32\x0c.api.MDEntryR\x06ratios\"\xdb\x01\n\x11MarketDataRequest\x12\x0f\n\x07MsgType\x18# \x01(\t\x12\x13\n\nMDStreamId\x18\xdc\x0b \x01(\t\x12 \n\x17SubscriptionRequestType\x18\x87\x02 \x01(\t\x12\x15\n\x0cThrottleType\x18\xcc\x0c \x01(\t\x12\x1d\n\x14ThrottleTimeInterval\x18\xce\x0c \x01(\x03\x12\x19\n\x10ThrottleTimeUnit\x18\xcf\x0c \x01(\t\x12\x17\n\x0e\x41ggregatedBook\x18\x8a\x02 \x01(\x03\x12\x14\n\x0bMarketDepth\x18\x88\x02 \x01(\x03\"T\n\x17MarketDataRequestReject\x12\x0f\n\x07MsgType\x18# \x01(\t\x12\x13\n\nMDStreamId\x18\xdc\x0b \x01(\t\x12\x13\n\nRejectText\x18\xb0\n \x01(\t\"/\n\x04\x42\x61rs\x12\'\n\x07MDEntry\x18\x8c\x02 \x03(\x0b\x32\x0c.api.MDEntryR\x07mdEntryb\x06proto3')
)
_MDENTRY = _descriptor.Descriptor(
name='MDEntry',
full_name='api.MDEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Symbol', full_name='api.MDEntry.Symbol', index=0,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='symbol', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDUpdateAction', full_name='api.MDEntry.MDUpdateAction', index=1,
number=279, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdUpdateAction', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntryType', full_name='api.MDEntry.MDEntryType', index=2,
number=269, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntryType', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntryPx', full_name='api.MDEntry.MDEntryPx', index=3,
number=270, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntryPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntrySize', full_name='api.MDEntry.MDEntrySize', index=4,
number=271, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntrySize', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='NumberOfOrders', full_name='api.MDEntry.NumberOfOrders', index=5,
number=346, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numberOfOrders', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TransactTime', full_name='api.MDEntry.TransactTime', index=6,
number=60, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='transactTime', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TradeId', full_name='api.MDEntry.TradeId', index=7,
number=1003, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tradeId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='AggressorSide', full_name='api.MDEntry.AggressorSide', index=8,
number=1501, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='aggressorSide', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='FirstPx', full_name='api.MDEntry.FirstPx', index=9,
number=1025, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='firstPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LastPx', full_name='api.MDEntry.LastPx', index=10,
number=31, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lastPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighPx', full_name='api.MDEntry.HighPx', index=11,
number=332, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowPx', full_name='api.MDEntry.LowPx', index=12,
number=333, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BuyVolume', full_name='api.MDEntry.BuyVolume', index=13,
number=330, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='buyVolume', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='SellVolume', full_name='api.MDEntry.SellVolume', index=14,
number=331, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='sellVolume', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Bid', full_name='api.MDEntry.Bid', index=15,
number=1502, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bid', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Ask', full_name='api.MDEntry.Ask', index=16,
number=1503, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='ask', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowRangePx', full_name='api.MDEntry.LowRangePx', index=17,
number=35601, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighRangePx', full_name='api.MDEntry.HighRangePx', index=18,
number=35602, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowLimitPx', full_name='api.MDEntry.LowLimitPx', index=19,
number=35603, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighLimitPx', full_name='api.MDEntry.HighLimitPx', index=20,
number=35604, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ClearingPx', full_name='api.MDEntry.ClearingPx', index=21,
number=35605, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='clearingPx', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=715,
)
_MARKETDATAREFRESH = _descriptor.Descriptor(
name='MarketDataRefresh',
full_name='api.MarketDataRefresh',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MsgType', full_name='api.MarketDataRefresh.MsgType', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='msgType', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDStreamId', full_name='api.MarketDataRefresh.MDStreamId', index=1,
number=1500, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdStreamId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LastUpdateTime', full_name='api.MarketDataRefresh.LastUpdateTime', index=2,
number=779, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lastUpdateTime', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDBookType', full_name='api.MarketDataRefresh.MDBookType', index=3,
number=1021, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdBookType', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Symbol', full_name='api.MarketDataRefresh.Symbol', index=4,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='symbol', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowRangePx', full_name='api.MarketDataRefresh.LowRangePx', index=5,
number=35601, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighRangePx', full_name='api.MarketDataRefresh.HighRangePx', index=6,
number=35602, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowLimitPx', full_name='api.MarketDataRefresh.LowLimitPx', index=7,
number=35603, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighLimitPx', full_name='api.MarketDataRefresh.HighLimitPx', index=8,
number=35604, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ClearingPx', full_name='api.MarketDataRefresh.ClearingPx', index=9,
number=35605, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='clearingPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BestBid', full_name='api.MarketDataRefresh.BestBid', index=10,
number=1502, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bestBid', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BestAsk', full_name='api.MarketDataRefresh.BestAsk', index=11,
number=1503, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bestAsk', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntry', full_name='api.MarketDataRefresh.MDEntry', index=12,
number=268, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntry', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Ratios', full_name='api.MarketDataRefresh.Ratios', index=13,
number=1504, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='ratios', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=718,
serialized_end=1202,
)
_MARKETDATAREQUEST = _descriptor.Descriptor(
name='MarketDataRequest',
full_name='api.MarketDataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MsgType', full_name='api.MarketDataRequest.MsgType', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDStreamId', full_name='api.MarketDataRequest.MDStreamId', index=1,
number=1500, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='SubscriptionRequestType', full_name='api.MarketDataRequest.SubscriptionRequestType', index=2,
number=263, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ThrottleType', full_name='api.MarketDataRequest.ThrottleType', index=3,
number=1612, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ThrottleTimeInterval', full_name='api.MarketDataRequest.ThrottleTimeInterval', index=4,
number=1614, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ThrottleTimeUnit', full_name='api.MarketDataRequest.ThrottleTimeUnit', index=5,
number=1615, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='AggregatedBook', full_name='api.MarketDataRequest.AggregatedBook', index=6,
number=266, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MarketDepth', full_name='api.MarketDataRequest.MarketDepth', index=7,
number=264, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1205,
serialized_end=1424,
)
_MARKETDATAREQUESTREJECT = _descriptor.Descriptor(
name='MarketDataRequestReject',
full_name='api.MarketDataRequestReject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MsgType', full_name='api.MarketDataRequestReject.MsgType', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDStreamId', full_name='api.MarketDataRequestReject.MDStreamId', index=1,
number=1500, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='RejectText', full_name='api.MarketDataRequestReject.RejectText', index=2,
number=1328, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1426,
serialized_end=1510,
)
_BARS = _descriptor.Descriptor(
name='Bars',
full_name='api.Bars',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MDEntry', full_name='api.Bars.MDEntry', index=0,
number=268, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntry', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1512,
serialized_end=1559,
)
_MARKETDATAREFRESH.fields_by_name['MDEntry'].message_type = _MDENTRY
_MARKETDATAREFRESH.fields_by_name['Ratios'].message_type = _MDENTRY
_BARS.fields_by_name['MDEntry'].message_type = _MDENTRY
DESCRIPTOR.message_types_by_name['MDEntry'] = _MDENTRY
DESCRIPTOR.message_types_by_name['MarketDataRefresh'] = _MARKETDATAREFRESH
DESCRIPTOR.message_types_by_name['MarketDataRequest'] = _MARKETDATAREQUEST
DESCRIPTOR.message_types_by_name['MarketDataRequestReject'] = _MARKETDATAREQUESTREJECT
DESCRIPTOR.message_types_by_name['Bars'] = _BARS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MDEntry = _reflection.GeneratedProtocolMessageType('MDEntry', (_message.Message,), dict(
DESCRIPTOR = _MDENTRY,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MDEntry)
))
_sym_db.RegisterMessage(MDEntry)
MarketDataRefresh = _reflection.GeneratedProtocolMessageType('MarketDataRefresh', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREFRESH,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MarketDataRefresh)
))
_sym_db.RegisterMessage(MarketDataRefresh)
MarketDataRequest = _reflection.GeneratedProtocolMessageType('MarketDataRequest', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREQUEST,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MarketDataRequest)
))
_sym_db.RegisterMessage(MarketDataRequest)
MarketDataRequestReject = _reflection.GeneratedProtocolMessageType('MarketDataRequestReject', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREQUESTREJECT,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MarketDataRequestReject)
))
_sym_db.RegisterMessage(MarketDataRequestReject)
Bars = _reflection.GeneratedProtocolMessageType('Bars', (_message.Message,), dict(
DESCRIPTOR = _BARS,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.Bars)
))
_sym_db.RegisterMessage(Bars)
# @@protoc_insertion_point(module_scope)
| 50.037736 | 2,836 | 0.735181 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: market.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='market.proto',
package='api',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0cmarket.proto\x12\x03\x61pi\"\xb5\x05\n\x07MDEntry\x12\x16\n\x06Symbol\x18\x37 \x01(\tR\x06symbol\x12\'\n\x0eMDUpdateAction\x18\x97\x02 \x01(\tR\x0emdUpdateAction\x12!\n\x0bMDEntryType\x18\x8d\x02 \x01(\tR\x0bmdEntryType\x12\x1d\n\tMDEntryPx\x18\x8e\x02 \x01(\tR\tmdEntryPx\x12!\n\x0bMDEntrySize\x18\x8f\x02 \x01(\tR\x0bmdEntrySize\x12\'\n\x0eNumberOfOrders\x18\xda\x02 \x01(\rR\x0enumberOfOrders\x12\"\n\x0cTransactTime\x18< \x01(\x03R\x0ctransactTime\x12\x19\n\x07TradeId\x18\xeb\x07 \x01(\tR\x07tradeId\x12%\n\rAggressorSide\x18\xdd\x0b \x01(\tR\raggressorSide\x12\x19\n\x07\x46irstPx\x18\x81\x08 \x01(\tR\x07\x66irstPx\x12\x16\n\x06LastPx\x18\x1f \x01(\tR\x06lastPx\x12\x17\n\x06HighPx\x18\xcc\x02 \x01(\tR\x06highPx\x12\x15\n\x05LowPx\x18\xcd\x02 \x01(\tR\x05lowPx\x12\x1d\n\tBuyVolume\x18\xca\x02 \x01(\tR\tbuyVolume\x12\x1f\n\nSellVolume\x18\xcb\x02 \x01(\tR\nsellVolume\x12\x11\n\x03\x42id\x18\xde\x0b \x01(\tR\x03\x62id\x12\x11\n\x03\x41sk\x18\xdf\x0b \x01(\tR\x03\x61sk\x12 \n\nLowRangePx\x18\x91\x96\x02 \x01(\tR\nlowRangePx\x12\"\n\x0bHighRangePx\x18\x92\x96\x02 \x01(\tR\x0bhighRangePx\x12 \n\nLowLimitPx\x18\x93\x96\x02 \x01(\tR\nlowLimitPx\x12\"\n\x0bHighLimitPx\x18\x94\x96\x02 \x01(\tR\x0bhighLimitPx\x12 \n\nClearingPx\x18\x95\x96\x02 \x01(\tR\nclearingPx\"\xe4\x03\n\x11MarketDataRefresh\x12\x18\n\x07MsgType\x18# \x01(\tR\x07msgType\x12\x1f\n\nMDStreamId\x18\xdc\x0b \x01(\tR\nmdStreamId\x12\'\n\x0eLastUpdateTime\x18\x8b\x06 \x01(\x03R\x0elastUpdateTime\x12\x1f\n\nMDBookType\x18\xfd\x07 \x01(\tR\nmdBookType\x12\x16\n\x06Symbol\x18\x37 \x01(\tR\x06symbol\x12 \n\nLowRangePx\x18\x91\x96\x02 \x01(\tR\nlowRangePx\x12\"\n\x0bHighRangePx\x18\x92\x96\x02 \x01(\tR\x0bhighRangePx\x12 \n\nLowLimitPx\x18\x93\x96\x02 \x01(\tR\nlowLimitPx\x12\"\n\x0bHighLimitPx\x18\x94\x96\x02 \x01(\tR\x0bhighLimitPx\x12 \n\nClearingPx\x18\x95\x96\x02 \x01(\tR\nclearingPx\x12\x19\n\x07\x42\x65stBid\x18\xde\x0b \x01(\tR\x07\x62\x65stBid\x12\x19\n\x07\x42\x65stAsk\x18\xdf\x0b \x01(\tR\x07\x62\x65stAsk\x12\'\n\x07MDEntry\x18\x8c\x02 \x03(\x0b\x32\x0c.api.MDEntryR\x07mdEntry\x12%\n\x06Ratios\x18\xe0\x0b \x03(\x0b\x32\x0c.api.MDEntryR\x06ratios\"\xdb\x01\n\x11MarketDataRequest\x12\x0f\n\x07MsgType\x18# \x01(\t\x12\x13\n\nMDStreamId\x18\xdc\x0b \x01(\t\x12 \n\x17SubscriptionRequestType\x18\x87\x02 \x01(\t\x12\x15\n\x0cThrottleType\x18\xcc\x0c \x01(\t\x12\x1d\n\x14ThrottleTimeInterval\x18\xce\x0c \x01(\x03\x12\x19\n\x10ThrottleTimeUnit\x18\xcf\x0c \x01(\t\x12\x17\n\x0e\x41ggregatedBook\x18\x8a\x02 \x01(\x03\x12\x14\n\x0bMarketDepth\x18\x88\x02 \x01(\x03\"T\n\x17MarketDataRequestReject\x12\x0f\n\x07MsgType\x18# \x01(\t\x12\x13\n\nMDStreamId\x18\xdc\x0b \x01(\t\x12\x13\n\nRejectText\x18\xb0\n \x01(\t\"/\n\x04\x42\x61rs\x12\'\n\x07MDEntry\x18\x8c\x02 \x03(\x0b\x32\x0c.api.MDEntryR\x07mdEntryb\x06proto3')
)
_MDENTRY = _descriptor.Descriptor(
name='MDEntry',
full_name='api.MDEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='Symbol', full_name='api.MDEntry.Symbol', index=0,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='symbol', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDUpdateAction', full_name='api.MDEntry.MDUpdateAction', index=1,
number=279, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdUpdateAction', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntryType', full_name='api.MDEntry.MDEntryType', index=2,
number=269, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntryType', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntryPx', full_name='api.MDEntry.MDEntryPx', index=3,
number=270, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntryPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntrySize', full_name='api.MDEntry.MDEntrySize', index=4,
number=271, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntrySize', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='NumberOfOrders', full_name='api.MDEntry.NumberOfOrders', index=5,
number=346, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='numberOfOrders', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TransactTime', full_name='api.MDEntry.TransactTime', index=6,
number=60, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='transactTime', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='TradeId', full_name='api.MDEntry.TradeId', index=7,
number=1003, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='tradeId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='AggressorSide', full_name='api.MDEntry.AggressorSide', index=8,
number=1501, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='aggressorSide', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='FirstPx', full_name='api.MDEntry.FirstPx', index=9,
number=1025, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='firstPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LastPx', full_name='api.MDEntry.LastPx', index=10,
number=31, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lastPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighPx', full_name='api.MDEntry.HighPx', index=11,
number=332, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowPx', full_name='api.MDEntry.LowPx', index=12,
number=333, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BuyVolume', full_name='api.MDEntry.BuyVolume', index=13,
number=330, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='buyVolume', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='SellVolume', full_name='api.MDEntry.SellVolume', index=14,
number=331, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='sellVolume', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Bid', full_name='api.MDEntry.Bid', index=15,
number=1502, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bid', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Ask', full_name='api.MDEntry.Ask', index=16,
number=1503, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='ask', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowRangePx', full_name='api.MDEntry.LowRangePx', index=17,
number=35601, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighRangePx', full_name='api.MDEntry.HighRangePx', index=18,
number=35602, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowLimitPx', full_name='api.MDEntry.LowLimitPx', index=19,
number=35603, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighLimitPx', full_name='api.MDEntry.HighLimitPx', index=20,
number=35604, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ClearingPx', full_name='api.MDEntry.ClearingPx', index=21,
number=35605, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='clearingPx', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=22,
serialized_end=715,
)
_MARKETDATAREFRESH = _descriptor.Descriptor(
name='MarketDataRefresh',
full_name='api.MarketDataRefresh',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MsgType', full_name='api.MarketDataRefresh.MsgType', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='msgType', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDStreamId', full_name='api.MarketDataRefresh.MDStreamId', index=1,
number=1500, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdStreamId', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LastUpdateTime', full_name='api.MarketDataRefresh.LastUpdateTime', index=2,
number=779, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lastUpdateTime', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDBookType', full_name='api.MarketDataRefresh.MDBookType', index=3,
number=1021, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdBookType', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Symbol', full_name='api.MarketDataRefresh.Symbol', index=4,
number=55, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='symbol', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowRangePx', full_name='api.MarketDataRefresh.LowRangePx', index=5,
number=35601, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighRangePx', full_name='api.MarketDataRefresh.HighRangePx', index=6,
number=35602, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highRangePx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='LowLimitPx', full_name='api.MarketDataRefresh.LowLimitPx', index=7,
number=35603, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='lowLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='HighLimitPx', full_name='api.MarketDataRefresh.HighLimitPx', index=8,
number=35604, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='highLimitPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ClearingPx', full_name='api.MarketDataRefresh.ClearingPx', index=9,
number=35605, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='clearingPx', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BestBid', full_name='api.MarketDataRefresh.BestBid', index=10,
number=1502, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bestBid', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='BestAsk', full_name='api.MarketDataRefresh.BestAsk', index=11,
number=1503, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='bestAsk', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDEntry', full_name='api.MarketDataRefresh.MDEntry', index=12,
number=268, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntry', file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='Ratios', full_name='api.MarketDataRefresh.Ratios', index=13,
number=1504, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='ratios', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=718,
serialized_end=1202,
)
_MARKETDATAREQUEST = _descriptor.Descriptor(
name='MarketDataRequest',
full_name='api.MarketDataRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MsgType', full_name='api.MarketDataRequest.MsgType', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDStreamId', full_name='api.MarketDataRequest.MDStreamId', index=1,
number=1500, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='SubscriptionRequestType', full_name='api.MarketDataRequest.SubscriptionRequestType', index=2,
number=263, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ThrottleType', full_name='api.MarketDataRequest.ThrottleType', index=3,
number=1612, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ThrottleTimeInterval', full_name='api.MarketDataRequest.ThrottleTimeInterval', index=4,
number=1614, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ThrottleTimeUnit', full_name='api.MarketDataRequest.ThrottleTimeUnit', index=5,
number=1615, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='AggregatedBook', full_name='api.MarketDataRequest.AggregatedBook', index=6,
number=266, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MarketDepth', full_name='api.MarketDataRequest.MarketDepth', index=7,
number=264, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1205,
serialized_end=1424,
)
_MARKETDATAREQUESTREJECT = _descriptor.Descriptor(
name='MarketDataRequestReject',
full_name='api.MarketDataRequestReject',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MsgType', full_name='api.MarketDataRequestReject.MsgType', index=0,
number=35, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='MDStreamId', full_name='api.MarketDataRequestReject.MDStreamId', index=1,
number=1500, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='RejectText', full_name='api.MarketDataRequestReject.RejectText', index=2,
number=1328, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1426,
serialized_end=1510,
)
_BARS = _descriptor.Descriptor(
name='Bars',
full_name='api.Bars',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='MDEntry', full_name='api.Bars.MDEntry', index=0,
number=268, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, json_name='mdEntry', file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1512,
serialized_end=1559,
)
_MARKETDATAREFRESH.fields_by_name['MDEntry'].message_type = _MDENTRY
_MARKETDATAREFRESH.fields_by_name['Ratios'].message_type = _MDENTRY
_BARS.fields_by_name['MDEntry'].message_type = _MDENTRY
DESCRIPTOR.message_types_by_name['MDEntry'] = _MDENTRY
DESCRIPTOR.message_types_by_name['MarketDataRefresh'] = _MARKETDATAREFRESH
DESCRIPTOR.message_types_by_name['MarketDataRequest'] = _MARKETDATAREQUEST
DESCRIPTOR.message_types_by_name['MarketDataRequestReject'] = _MARKETDATAREQUESTREJECT
DESCRIPTOR.message_types_by_name['Bars'] = _BARS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MDEntry = _reflection.GeneratedProtocolMessageType('MDEntry', (_message.Message,), dict(
DESCRIPTOR = _MDENTRY,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MDEntry)
))
_sym_db.RegisterMessage(MDEntry)
MarketDataRefresh = _reflection.GeneratedProtocolMessageType('MarketDataRefresh', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREFRESH,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MarketDataRefresh)
))
_sym_db.RegisterMessage(MarketDataRefresh)
MarketDataRequest = _reflection.GeneratedProtocolMessageType('MarketDataRequest', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREQUEST,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MarketDataRequest)
))
_sym_db.RegisterMessage(MarketDataRequest)
MarketDataRequestReject = _reflection.GeneratedProtocolMessageType('MarketDataRequestReject', (_message.Message,), dict(
DESCRIPTOR = _MARKETDATAREQUESTREJECT,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.MarketDataRequestReject)
))
_sym_db.RegisterMessage(MarketDataRequestReject)
Bars = _reflection.GeneratedProtocolMessageType('Bars', (_message.Message,), dict(
DESCRIPTOR = _BARS,
__module__ = 'market_pb2'
# @@protoc_insertion_point(class_scope:api.Bars)
))
_sym_db.RegisterMessage(Bars)
# @@protoc_insertion_point(module_scope)
| 0 | 0 | 0 |
66fe3327fbd77974a9201a9adf66092754f63ac9 | 1,112 | py | Python | server/scripts/run_task.py | Yinqingwen/Dva | 3b8d1d1435f6a804a9c370006b931f9dc50a7462 | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 3 | 2019-03-05T00:46:56.000Z | 2021-11-26T10:20:40.000Z | server/scripts/run_task.py | jiangxu87/DeepVideoAnalytics | e401b3273782409b2604657514bec293d6aa75b0 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | server/scripts/run_task.py | jiangxu87/DeepVideoAnalytics | e401b3273782409b2604657514bec293d6aa75b0 | [
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 4 | 2021-09-22T07:47:27.000Z | 2022-01-23T14:16:08.000Z | #!/usr/bin/env python
import django
import sys, os, logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='../logs/task.log',
filemode='a')
sys.path.append(os.path.join(os.path.dirname(__file__),'../'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
django.setup()
from dvaapp.models import TEvent
from dvaapp.task_handlers import handle_perform_analysis, handle_perform_indexing, handle_perform_detection
if __name__ == '__main__':
task_name = sys.argv[-2]
pk = int(sys.argv[-1])
logging.info("Executing {} {}".format(task_name,pk))
if task_name == 'perform_indexing':
handle_perform_indexing(TEvent.objects.get(pk=pk))
elif task_name == 'perform_detection':
handle_perform_detection(TEvent.objects.get(pk=pk))
elif task_name == 'perform_analysis':
handle_perform_analysis(TEvent.objects.get(pk=pk))
else:
raise ValueError("Unknown task name {}".format(task_name)) | 41.185185 | 107 | 0.667266 | #!/usr/bin/env python
import django
import sys, os, logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='../logs/task.log',
filemode='a')
sys.path.append(os.path.join(os.path.dirname(__file__),'../'))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dva.settings")
django.setup()
from dvaapp.models import TEvent
from dvaapp.task_handlers import handle_perform_analysis, handle_perform_indexing, handle_perform_detection
if __name__ == '__main__':
task_name = sys.argv[-2]
pk = int(sys.argv[-1])
logging.info("Executing {} {}".format(task_name,pk))
if task_name == 'perform_indexing':
handle_perform_indexing(TEvent.objects.get(pk=pk))
elif task_name == 'perform_detection':
handle_perform_detection(TEvent.objects.get(pk=pk))
elif task_name == 'perform_analysis':
handle_perform_analysis(TEvent.objects.get(pk=pk))
else:
raise ValueError("Unknown task name {}".format(task_name)) | 0 | 0 | 0 |
fb8172a0b99bd5f952cf12256c6a3240ea96b41a | 4,347 | py | Python | utils/data_io.py | sjtuytc/AAAI21-RoutineAugmentedPolicyLearning | 7192f0bf26378d8aacb21c0220cc705cb577c6dc | [
"MIT"
] | 15 | 2021-01-07T11:51:14.000Z | 2021-07-22T14:54:15.000Z | utils/data_io.py | sjtuytc/-AAAI21-RoutineAugmentedPolicyLearning-RAPL- | 7192f0bf26378d8aacb21c0220cc705cb577c6dc | [
"MIT"
] | 1 | 2021-05-29T13:25:34.000Z | 2021-05-29T23:38:15.000Z | utils/data_io.py | sjtuytc/AAAI21-RoutineAugmentedPolicyLearning | 7192f0bf26378d8aacb21c0220cc705cb577c6dc | [
"MIT"
] | null | null | null | import os
import json
import ffmpeg
import pickle
import sys
import matplotlib.pyplot as plt
from cv2 import VideoWriter, VideoWriter_fourcc, resize
import numpy as np
import cv2
def imgseq2video(imgseq, name="pick_up", decode="mp4v", folder=None, fps=3, o_h=500, o_w=500,
full_path=None, rgb_to_bgr=True, verbose=True):
"""
Generate a video from a img sequence list.
:param imgseq: RGB image frames.
:param name: video file name.
:param decode: video decoder type, X264 is not working.
:param folder: saved to which folder.
:param fps: fps of saved video.
:param o_h: height of video.
:param o_w: width of video
:param full_path: full path to the video, if not None, overwrite folder and name.
:param rgb_to_bgr: convert rgb image to bgr img.
:param verbose: whether to print save path.
:return: None.
"""
if len(imgseq) < 1:
print("[WARNING] Try to save empty video.")
return
# Suppress OpenCV and ffmpeg output.
sys.stdout = open(os.devnull, "w")
if full_path is not None:
assert ".mp4" in full_path[-4:], "Full path should end with .mp4"
tmp_path = full_path[:-4] + "tmp" + ".mp4"
path = full_path
else:
tmp_path = name + "tmp.mp4" if folder is None else os.path.join(folder, name + "tmp.mp4")
path = name + ".mp4" if folder is None else os.path.join(folder, name + ".mp4")
fourcc = VideoWriter_fourcc(*decode)
videoWriter = VideoWriter(tmp_path, fourcc, fps, (o_w, o_h))
for img in imgseq:
img = np.uint8(img)
if img.shape[0] == 3:
# needs to be in shape of oh, ow, 3
img = img.transpose(1, 2, 0)
if rgb_to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = resize(img, (o_w, o_h))
videoWriter.write(img)
videoWriter.release()
(
ffmpeg
.input(tmp_path)
.output(path, vcodec="h264", loglevel="error")
.overwrite_output()
.run()
)
# .global_args('-loglevel', 'error')
# print("should be blocked")
os.remove(tmp_path)
sys.stdout = sys.__stdout__
if verbose:
print("Video saved to", path, "with ", len(imgseq), " total frames.")
return path
| 33.960938 | 97 | 0.652634 | import os
import json
import ffmpeg
import pickle
import sys
import matplotlib.pyplot as plt
from cv2 import VideoWriter, VideoWriter_fourcc, resize
import numpy as np
import cv2
def imgseq2video(imgseq, name="pick_up", decode="mp4v", folder=None, fps=3, o_h=500, o_w=500,
full_path=None, rgb_to_bgr=True, verbose=True):
"""
Generate a video from a img sequence list.
:param imgseq: RGB image frames.
:param name: video file name.
:param decode: video decoder type, X264 is not working.
:param folder: saved to which folder.
:param fps: fps of saved video.
:param o_h: height of video.
:param o_w: width of video
:param full_path: full path to the video, if not None, overwrite folder and name.
:param rgb_to_bgr: convert rgb image to bgr img.
:param verbose: whether to print save path.
:return: None.
"""
if len(imgseq) < 1:
print("[WARNING] Try to save empty video.")
return
# Suppress OpenCV and ffmpeg output.
sys.stdout = open(os.devnull, "w")
if full_path is not None:
assert ".mp4" in full_path[-4:], "Full path should end with .mp4"
tmp_path = full_path[:-4] + "tmp" + ".mp4"
path = full_path
else:
tmp_path = name + "tmp.mp4" if folder is None else os.path.join(folder, name + "tmp.mp4")
path = name + ".mp4" if folder is None else os.path.join(folder, name + ".mp4")
fourcc = VideoWriter_fourcc(*decode)
videoWriter = VideoWriter(tmp_path, fourcc, fps, (o_w, o_h))
for img in imgseq:
img = np.uint8(img)
if img.shape[0] == 3:
# needs to be in shape of oh, ow, 3
img = img.transpose(1, 2, 0)
if rgb_to_bgr:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = resize(img, (o_w, o_h))
videoWriter.write(img)
videoWriter.release()
(
ffmpeg
.input(tmp_path)
.output(path, vcodec="h264", loglevel="error")
.overwrite_output()
.run()
)
# .global_args('-loglevel', 'error')
# print("should be blocked")
os.remove(tmp_path)
sys.stdout = sys.__stdout__
if verbose:
print("Video saved to", path, "with ", len(imgseq), " total frames.")
return path
def save_into_json(save_obj, folder, file_name="test", full_path=None, verbose=True):
if full_path is None:
full_path = os.path.join(folder, str(file_name) + ".json")
gt_file = open(full_path, 'w', encoding='utf-8')
json.dump(save_obj, gt_file)
if verbose:
print("Current obj saved at", full_path)
gt_file.close()
return full_path
def read_from_json(folder, file_name="test", full_path=None, verbose=False):
if full_path is None:
full_path = os.path.join(folder, str(file_name) + ".json")
file_obj = open(full_path)
data_obj = json.load(file_obj)
file_obj.close()
if verbose:
print("Read obj from", full_path)
return data_obj
def save_into_img(img_matrix, folder=None, img_name=None, verbose=False):
full_path = os.path.join(folder, img_name + ".jpg")
plt.imsave(full_path, img_matrix, dpi=1000)
if verbose:
print("Cur img saved at", os.path.join(full_path))
return full_path
def save_into_pkl(save_obj, full_path=None, name="test", folder="", verbose=False):
if full_path is None:
full_path = os.path.join(folder, str(name) + '.pkl')
output = open(full_path, 'wb')
pickle.dump(save_obj, output)
output.close()
if verbose:
print("Current obj saved at", os.path.join(full_path))
return full_path
def read_from_pkl(name="test", folder="", full_path=None):
if full_path is None:
full_path = os.path.join(folder, str(name) + ".pkl")
pkl_file = open(full_path, 'rb')
return_obj = pickle.load(pkl_file)
pkl_file.close()
return return_obj
def load_routine_action(exp_folder, file_name, routine_num, routine_ablation):
result_data = read_from_json(folder=exp_folder, file_name=file_name, verbose=True)
routine_key = "routines"
if routine_ablation != "":
routine_key = routine_ablation
return result_data[routine_key][:routine_num]
def save_routine_action(result_data, exp_folder):
save_into_json(result_data, file_name="routine_library", folder=exp_folder)
| 1,895 | 0 | 161 |
41a186477bffc2cec0c8c825201fa2f3c56e3e41 | 2,132 | py | Python | examples/apitest.py | ellethee/argparseinator | f333282429a81c6965e93472fa24bde203275b31 | [
"MIT"
] | 5 | 2017-06-16T08:11:16.000Z | 2018-12-17T15:55:11.000Z | examples/apitest.py | ellethee/argparseinator | f333282429a81c6965e93472fa24bde203275b31 | [
"MIT"
] | null | null | null | examples/apitest.py | ellethee/argparseinator | f333282429a81c6965e93472fa24bde203275b31 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ArgParseInator test
"""
__file_name__ = "apitest.py"
__author__ = "luca"
__version__ = "1.0.0"
__date__ = "2014-11-18"
import argparseinator
from argparseinator import arg, ap_arg, class_args
@argparseinator.arg("name", help="The name to print")
@argparseinator.arg('-s', '--surname', default='', help="optional surname")
def print_name(name, surname, address):
"""
Will print the passed name.
"""
print "Printing the name...", name, surname, address
@argparseinator.arg(cmd_name="foo")
def foo_name():
"""
print foo.
"""
print "foo"
@class_args
class CommandsContainer(object):
"""
CommandsContainer class.
"""
prefix = "The name is"
__arguments__ = [ap_arg('--arguments', help="Class arguments")]
__shared_arguments__ = [
ap_arg('name', help="The name"),
ap_arg('--prefix', help="string prefix", default='We have')]
@arg()
def name(self, name, prefix):
"""
Print the name.
"""
print prefix, 'name', name
@arg()
def surname(self, name, prefix):
"""
Print the surname.
"""
print prefix, 'surname', name
@arg()
def nickname(self, name, prefix):
"""
Print the nickname.
"""
print prefix, "nickname", name
@class_args
class Greetings(object):
"""
Greeting command.
"""
__cmd_name__ = 'greet'
__arguments__ = [ap_arg(
'-p', '--prefix', help='greeting prefix', default="We say")]
__shared_arguments__ = [ap_arg('name', help='the name')]
@arg()
def ciao(self, name, prefix):
"""
Say ciao.
"""
print prefix, 'Ciao', 'to', name
@arg()
def hello(self, name, prefix):
"""
Say hello.
"""
print prefix, 'hello', 'to', name
if __name__ == "__main__":
inator = argparseinator.ArgParseInator(
description="Silly script",
args=[
ap_arg('--address', help='Person address', default='Home'),
]
)
inator.check_command()
| 21.535354 | 75 | 0.563321 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ArgParseInator test
"""
__file_name__ = "apitest.py"
__author__ = "luca"
__version__ = "1.0.0"
__date__ = "2014-11-18"
import argparseinator
from argparseinator import arg, ap_arg, class_args
@argparseinator.arg("name", help="The name to print")
@argparseinator.arg('-s', '--surname', default='', help="optional surname")
def print_name(name, surname, address):
"""
Will print the passed name.
"""
print "Printing the name...", name, surname, address
@argparseinator.arg(cmd_name="foo")
def foo_name():
"""
print foo.
"""
print "foo"
@class_args
class CommandsContainer(object):
"""
CommandsContainer class.
"""
prefix = "The name is"
__arguments__ = [ap_arg('--arguments', help="Class arguments")]
__shared_arguments__ = [
ap_arg('name', help="The name"),
ap_arg('--prefix', help="string prefix", default='We have')]
@arg()
def name(self, name, prefix):
"""
Print the name.
"""
print prefix, 'name', name
@arg()
def surname(self, name, prefix):
"""
Print the surname.
"""
print prefix, 'surname', name
@arg()
def nickname(self, name, prefix):
"""
Print the nickname.
"""
print prefix, "nickname", name
@class_args
class Greetings(object):
"""
Greeting command.
"""
__cmd_name__ = 'greet'
__arguments__ = [ap_arg(
'-p', '--prefix', help='greeting prefix', default="We say")]
__shared_arguments__ = [ap_arg('name', help='the name')]
@arg()
def ciao(self, name, prefix):
"""
Say ciao.
"""
print prefix, 'Ciao', 'to', name
@arg()
def hello(self, name, prefix):
"""
Say hello.
"""
print prefix, 'hello', 'to', name
if __name__ == "__main__":
inator = argparseinator.ArgParseInator(
description="Silly script",
args=[
ap_arg('--address', help='Person address', default='Home'),
]
)
inator.check_command()
| 0 | 0 | 0 |
51315191981b5f4db2f2d9fffee1e702fa665a4d | 1,789 | py | Python | examples/copod_interpretability.py | yuezhao9210/py-Anomaly-Detection | bb3a14ea4df149e3773fa34116dfc62e1c8d5c89 | [
"BSD-2-Clause"
] | 2 | 2017-10-07T21:41:48.000Z | 2017-10-08T02:51:12.000Z | examples/copod_interpretability.py | gian21391/pyod | bb3a14ea4df149e3773fa34116dfc62e1c8d5c89 | [
"BSD-2-Clause"
] | null | null | null | examples/copod_interpretability.py | gian21391/pyod | bb3a14ea4df149e3773fa34116dfc62e1c8d5c89 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Example of using Copula Based Outlier Detector (COPOD) for outlier detection
Sample wise interpretation is provided here.
"""
# Author: Winston Li <jk_zhengli@hotmail.com>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import os
import sys
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from pyod.models.copod import COPOD
from pyod.utils.utility import standardizer
if __name__ == "__main__":
# Define data file and read X and y
# Generate some data if the source data is missing
mat_file = 'cardio.mat'
mat = loadmat(os.path.join('data', mat_file))
X = mat['X']
y = mat['y'].ravel()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,
random_state=1)
# standardizing data for processing
X_train_norm, X_test_norm = standardizer(X_train, X_test)
# train COPOD detector
clf_name = 'COPOD'
clf = COPOD()
# you could try parallel version as well.
# clf = COPOD(n_jobs=2)
clf.fit(X_train)
# get the prediction labels and outlier scores of the training data
y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)
y_train_scores = clf.decision_scores_ # raw outlier scores
print('The first sample is an outlier', y_train[0])
clf.explain_outlier(0)
# we could see feature 7, 16, and 20 is above the 0.99 cutoff
# and play a more important role in deciding it is an outlier.
| 31.385965 | 79 | 0.693125 | # -*- coding: utf-8 -*-
"""Example of using Copula Based Outlier Detector (COPOD) for outlier detection
Sample wise interpretation is provided here.
"""
# Author: Winston Li <jk_zhengli@hotmail.com>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import os
import sys
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
from pyod.models.copod import COPOD
from pyod.utils.utility import standardizer
if __name__ == "__main__":
# Define data file and read X and y
# Generate some data if the source data is missing
mat_file = 'cardio.mat'
mat = loadmat(os.path.join('data', mat_file))
X = mat['X']
y = mat['y'].ravel()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4,
random_state=1)
# standardizing data for processing
X_train_norm, X_test_norm = standardizer(X_train, X_test)
# train COPOD detector
clf_name = 'COPOD'
clf = COPOD()
# you could try parallel version as well.
# clf = COPOD(n_jobs=2)
clf.fit(X_train)
# get the prediction labels and outlier scores of the training data
y_train_pred = clf.labels_ # binary labels (0: inliers, 1: outliers)
y_train_scores = clf.decision_scores_ # raw outlier scores
print('The first sample is an outlier', y_train[0])
clf.explain_outlier(0)
# we could see feature 7, 16, and 20 is above the 0.99 cutoff
# and play a more important role in deciding it is an outlier.
| 0 | 0 | 0 |
402a38cc05b831560371cf1e7be6698ac1465844 | 658 | py | Python | utilities/exclude/open3d_utilities.py | bootml/agent | 84235db931d6e4ef956962961c619994898ebdd5 | [
"Apache-2.0"
] | null | null | null | utilities/exclude/open3d_utilities.py | bootml/agent | 84235db931d6e4ef956962961c619994898ebdd5 | [
"Apache-2.0"
] | null | null | null | utilities/exclude/open3d_utilities.py | bootml/agent | 84235db931d6e4ef956962961c619994898ebdd5 | [
"Apache-2.0"
] | 1 | 2018-09-27T14:31:41.000Z | 2018-09-27T14:31:41.000Z | import numpy as np
import open3d
if __name__ == "__main__":
print("Load a ply point cloud, print it, and render it")
pcd = open3d.read_point_cloud('/home/heider/Datasets/pointclouds/office.ply')
print(pcd)
print(np.asarray(pcd.points))
# open3d.draw_geometries([pcd])
print("Downsample the point cloud with a voxel of 0.05")
downsampled = open3d.voxel_down_sample(pcd, voxel_size=0.1)
# open3d.draw_geometries([downpcd])
print("Recompute the normal of the downsampled point cloud")
open3d.estimate_normals(downsampled, search_param=open3d.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
open3d.draw_geometries([downsampled])
| 32.9 | 83 | 0.75228 | import numpy as np
import open3d
if __name__ == "__main__":
print("Load a ply point cloud, print it, and render it")
pcd = open3d.read_point_cloud('/home/heider/Datasets/pointclouds/office.ply')
print(pcd)
print(np.asarray(pcd.points))
# open3d.draw_geometries([pcd])
print("Downsample the point cloud with a voxel of 0.05")
downsampled = open3d.voxel_down_sample(pcd, voxel_size=0.1)
# open3d.draw_geometries([downpcd])
print("Recompute the normal of the downsampled point cloud")
open3d.estimate_normals(downsampled, search_param=open3d.KDTreeSearchParamHybrid(
radius=0.1, max_nn=30))
open3d.draw_geometries([downsampled])
| 0 | 0 | 0 |
e09353a2f58856b6f9193371e036223a45ff61bf | 7,272 | py | Python | arxiv/train.py | ShiboYao/EigLearn | 2fa865e629607487487c5b990257c0f4df095aa0 | [
"MIT"
] | 1 | 2022-03-31T03:59:00.000Z | 2022-03-31T03:59:00.000Z | arxiv/train.py | ShiboYao/EigLearn | 2fa865e629607487487c5b990257c0f4df095aa0 | [
"MIT"
] | null | null | null | arxiv/train.py | ShiboYao/EigLearn | 2fa865e629607487487c5b990257c0f4df095aa0 | [
"MIT"
] | 1 | 2021-12-07T11:35:45.000Z | 2021-12-07T11:35:45.000Z | import argparse
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch_sparse import fill_diag, sum as sparsesum, mul
import torch_geometric.transforms as T
from gcn import GCN
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import eigsh
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from logger import Logger
def sym_normalize_adj(adj):
"""symmetrically normalize adjacency matrix"""
adj = sp.coo_matrix(adj)
degree = np.array(adj.sum(1)).flatten()
d_inv_sqrt = np.power(np.maximum(degree, np.finfo(float).eps), -0.5)
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def row_normalize(adj):
"""row normalize"""
adj = sp.coo_matrix(adj)
degree = np.array(adj.sum(1)).flatten()
d_mat_inv = sp.diags(1./np.maximum(degree, np.finfo(float).eps))
return d_mat_inv.dot(adj).tocoo()
def preprocess_high_order_adj(adj, order, eps):
"""A higher-order polynomial with sparsification"""
adj = row_normalize(adj)
adj_sum = adj
cur_adj = adj
for i in range(1, order):
cur_adj = cur_adj.dot(adj)
adj_sum += cur_adj
adj_sum /= order
adj_sum.setdiag(0)
adj_sum.data[adj_sum.data<eps] = 0
adj_sum.eliminate_zeros()
adj_sum += sp.eye(adj.shape[0])
return sym_normalize_adj(adj_sum + adj_sum.T)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
@torch.no_grad()
if __name__ == "__main__":
main()
| 34.961538 | 74 | 0.588284 | import argparse
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch_sparse import fill_diag, sum as sparsesum, mul
import torch_geometric.transforms as T
from gcn import GCN
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import eigsh
from ogb.nodeproppred import PygNodePropPredDataset, Evaluator
from logger import Logger
def sym_normalize_adj(adj):
"""symmetrically normalize adjacency matrix"""
adj = sp.coo_matrix(adj)
degree = np.array(adj.sum(1)).flatten()
d_inv_sqrt = np.power(np.maximum(degree, np.finfo(float).eps), -0.5)
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def row_normalize(adj):
"""row normalize"""
adj = sp.coo_matrix(adj)
degree = np.array(adj.sum(1)).flatten()
d_mat_inv = sp.diags(1./np.maximum(degree, np.finfo(float).eps))
return d_mat_inv.dot(adj).tocoo()
def preprocess_high_order_adj(adj, order, eps):
"""A higher-order polynomial with sparsification"""
adj = row_normalize(adj)
adj_sum = adj
cur_adj = adj
for i in range(1, order):
cur_adj = cur_adj.dot(adj)
adj_sum += cur_adj
adj_sum /= order
adj_sum.setdiag(0)
adj_sum.data[adj_sum.data<eps] = 0
adj_sum.eliminate_zeros()
adj_sum += sp.eye(adj.shape[0])
return sym_normalize_adj(adj_sum + adj_sum.T)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def train(model, data, train_idx, optimizer):
model.train()
optimizer.zero_grad()
out = model(data.x, data.adj_t)[train_idx]
loss = F.nll_loss(out, data.y.squeeze(1)[train_idx])
loss.backward()
optimizer.step()
return loss.item()
@torch.no_grad()
def test(model, data, split_idx, evaluator):
model.eval()
out = model(data.x, data.adj_t)
y_pred = out.argmax(dim=-1, keepdim=True)
train_acc = evaluator.eval({
'y_true': data.y[split_idx['train']],
'y_pred': y_pred[split_idx['train']],
})['acc']
valid_acc = evaluator.eval({
'y_true': data.y[split_idx['valid']],
'y_pred': y_pred[split_idx['valid']],
})['acc']
test_acc = evaluator.eval({
'y_true': data.y[split_idx['test']],
'y_pred': y_pred[split_idx['test']],
})['acc']
return train_acc, valid_acc, test_acc
def main():
parser = argparse.ArgumentParser(description='OGBN-Arxiv (GNN)')
parser.add_argument('--device', type=int, default=0)
parser.add_argument('--log_steps', type=int, default=1)
parser.add_argument('--mat', type=str, default='sym')
parser.add_argument('--hidden_channels', type=int, default=256)
parser.add_argument('--k', type=int, default=40)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=0.01)
parser.add_argument('--lr_s', type=float, default=0.002)
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--epochs_s', type=int, default=50)
parser.add_argument('--runs', type=int, default=10)
args = parser.parse_args()
print(args)
device = f'cuda:{args.device}' if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
dataset = PygNodePropPredDataset(name='ogbn-arxiv',
transform=T.ToSparseTensor())
data = dataset[0]
adj_t = data.adj_t.to_symmetric()
adj_t = adj_t.to_scipy('coo')
if args.mat=='sym':
adj_t = sym_normalize_adj(adj_t + sp.eye(adj_t.shape[0]))
else:
adj_t = preprocess_high_order_adj(adj_t,3,1e-4)
data.adj_t = sparse_mx_to_torch_sparse_tensor(adj_t)
data = data.to(device)
adj = adj_t
eigval, eigvec_mat = eigsh(adj, k=args.k, tol=1e-8, which='LM')
eigvec_mat = torch.FloatTensor(eigvec_mat).cuda()
split_idx = dataset.get_idx_split()
train_num = split_idx['train'].shape[0]
valid_num = split_idx['valid'].shape[0]
test_num = split_idx['test'].shape[0]
idx = torch.randperm(train_num + valid_num + test_num)
split_idx['train'] = idx[:train_num]
split_idx['valid'] = idx[train_num:(train_num+valid_num)]
split_idx['test'] = idx[-test_num:]
train_idx = split_idx['train'].to(device)
model = GCN(data.num_features, args.hidden_channels,
dataset.num_classes, args.k, eigvec_mat,
args.dropout).to(device)
evaluator = Evaluator(name='ogbn-arxiv')
logger1 = Logger(args.runs, args)
logger2 = Logger(args.runs, args)
for run in range(args.runs):
model.reset_parameters()
optimizer = optim.Adam([
{'params':model.gc1_weight},
{'params':model.gc1_bias},
{'params':model.bn1.weight},
{'params':model.bn1.bias},
{'params':model.gc2_weight},
{'params':model.gc2_bias},
{'params':model.bn2.weight},
{'params':model.bn2.bias},
{'params':model.gc3_weight},
{'params':model.gc3_bias},
],
lr=args.lr)
for epoch in range(1, 1 + args.epochs):
loss = train(model, data, train_idx, optimizer)
result = test(model, data, split_idx, evaluator)
logger1.add_result(run, result)
if epoch % args.log_steps == 0:
train_acc, valid_acc, test_acc = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}% '
f'Test: {100 * test_acc:.2f}%')
logger1.print_statistics(run)
optimizer = optim.Adam([
{'params':model.delta},
],
lr=args.lr_s)
for epoch in range(1, 1 + args.epochs_s):
loss = train(model, data, train_idx, optimizer)
result = test(model, data, split_idx, evaluator)
logger2.add_result(run, result)
if epoch % args.log_steps == 0:
train_acc, valid_acc, test_acc = result
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * train_acc:.2f}%, '
f'Valid: {100 * valid_acc:.2f}% '
f'Test: {100 * test_acc:.2f}%')
logger2.print_statistics(run)
logger1.print_statistics()
logger2.print_statistics()
if __name__ == "__main__":
main()
| 5,307 | 0 | 68 |
28c6b752e1f2d4d2a7ec36965fffd27a5f0e0f16 | 9,829 | py | Python | explore.py | Talon24/explore | fc4202af49827afe0d82e694b0059be860db18c6 | [
"MIT"
] | 1 | 2021-03-16T13:44:57.000Z | 2021-03-16T13:44:57.000Z | explore.py | Talon24/explore | fc4202af49827afe0d82e694b0059be860db18c6 | [
"MIT"
] | null | null | null | explore.py | Talon24/explore | fc4202af49827afe0d82e694b0059be860db18c6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Human readable object exploration module.
It is designed to be more verbose than the dir()-function, while being more
compact than help().
"""
from __future__ import print_function
__author__ = "Talon24"
__license__ = "MIT"
__version__ = "0.1.10"
__maintainer__ = "Talon24"
__url__ = "https://github.com/Talon24/explore"
__status__ = "Developement"
__all__ = ["explore", "explore_object", "explore_signature"]
import pydoc
import inspect
import itertools
import colorama
import terminaltables
# import pkg_resources
colorama.init()
TABLETYPE = terminaltables.DoubleTable
COLORIZE = True
# _MAPPING = pkg_resources.resource_string("explore", "mapping.json")
# Isn't created in a subdirectory without more than one module.
_MAPPING = {
"__add__": "+",
"__sub__": "-",
"__mul__": "*",
"__truediv__": "/",
"__floordiv__": "//",
"__matmul__": "@",
"__pow__": "**",
"__mod__": "%",
"__divmod__": "divmod",
"__and__": "&",
"__or__": "|",
"__xor__": "^",
"__lshift__": "<<",
"__rshift__": ">>",
"__iadd__": "+=",
"__isub__": "-=",
"__imul__": "*=",
"__itruediv__": "/=",
"__ifloordiv__": "//=",
"__imatmul__": "@=",
"__ipow__": "**=",
"__imod__": "%=",
"__iand__": "&=",
"__ior__": "|=",
"__ixor__": "^=",
"__ilshift__": "<<=",
"__irshift__": ">>=",
"__eq__": "==",
"__ne__": "!=",
"__lt__": "<",
"__gt__": ">",
"__leq__": "<=",
"__geq__": ">=",
"__invert__": "~",
"__pos__": "+()",
"__neg__": "-()",
"__abs__": "abs",
"__len__": "len",
"__int__": "int",
"__float__": "float",
"__round__": "round",
"__enter__": "with:",
"__await__": "await",
"__contains__": "in",
"__getitem__": "[]",
"__setitem__": "[] = x",
"__delitem__": "del x",
"__call__": "()"
}
def colored(data, color):
"""Color a string with colorama and reset."""
if COLORIZE:
return "{color}{data}{reset}".format(color=color, data=data,
reset=colorama.Style.RESET_ALL)
else:
return data
def _map_dunders(thing, items):
"""Match dunder methods to the operator/construct they are related to."""
ops = []
for item in items:
if item in _MAPPING:
text = _MAPPING[item]
ops.append(text)
# Special case: Hash. Classes can have hashes, but not their instances,
# or hash might be None.
# list has a __hash__ - attr (None), even though it is not hashable
if "__hash__" in items and thing.__hash__:
ops.append("hash")
return ops
def _prune_data(thing, data):
"""Move items out of the Data row."""
remappable = ("method_descriptor", "builtin_function_or_method")
uninteresting = ("PytestTester", "_Feature")
for item in data["Data"][:]:
typename = type(getattr(thing, item)).__name__
if typename in remappable or typename in uninteresting:
if typename in remappable:
if inspect.ismodule(thing):
data["Functions"].append(item)
else:
data["Methods"].append(item)
data["Data"].remove(item)
def _prune_arguments_list(data, header):
"""Remove default information from list of arguments if all are unset."""
type_index = header.index("Type")
if all(entry[type_index] == "Any" for entry in data):
for entry in data:
del entry[type_index]
del header[type_index]
kind_index = header.index("Kind")
if all(entry[kind_index] == "Positional Or Keyword" for entry in data):
for entry in data:
del entry[kind_index]
del header[kind_index]
def explore_signature(thing, show_hidden=False):
"""Show information about a function and its parameters as a table."""
try:
signature = inspect.signature(thing)
except ValueError as exc:
print(colored("{!r} does not reveal its signature.".format(
thing), colorama.Fore.RED))
standard_builtins = (__import__, breakpoint, dir, getattr, iter,
max, min, next, print, vars)
if thing in standard_builtins:
print(colored("Check the documentation at "
"https://docs.python.org/3/library/functions.html#{}"
" .".format(thing.__name__), colorama.Fore.RED))
return
empty = inspect.Signature.empty
header = ["Argument", "Default", "Type", "Kind"]
data = []
return_type = signature.return_annotation
for name, parameter in signature.parameters.items():
# kind = parameter.kind.name.replace("_", " ").title()
kind = parameter.kind.description
default = parameter.default
default = repr(default) if default is not empty else "---"
annotation = parameter.annotation
annotation = annotation.__name__ if annotation is not empty else "Any"
data.append([name, default, annotation, kind])
# Coloring
for row in data:
if row[0] in ("self", "cls"):
row[0] = colored(row[0], colorama.Fore.YELLOW)
elif row[1] == "---" and not row[3].startswith("var"):
# Required argument, as no default is set.
# Variadic is allowed to be empty, though.
row[0] = colored(row[0], colorama.Fore.RED)
if not show_hidden:
_prune_arguments_list(data, header)
# Convert to Table
table = TABLETYPE([header] + data)
if not inspect.isclass(thing):
table.title = " Function {} ".format(thing.__name__)
if return_type is not inspect.Signature.empty:
table.title += "-> {} ".format(return_type.__name__)
else:
table.title = " Constructor "
description = pydoc.getdoc(thing).split(".")[0]
if description:
print(" Description:\n{}.".format(description))
if not len(data) == 0:
print(table.table)
else:
print("This Function takes no arguments.")
def explore_object(thing, show_hidden=False):
"""Show dir(thing) as a table to make it more human readable."""
items = set(dir(thing))
data = dict()
# Extract members, assign them to categories
data["Dunders"] = [
item for item in items if item.startswith("__") and item.endswith("__")]
items.difference_update(data["Dunders"])
data["Secrets"] = [
item for item in items if item.startswith("_")]
items.difference_update(data["Secrets"])
data["Constants"] = [
item for item in items if item.isupper()]
items.difference_update(data["Constants"])
data["Modules"] = [
item for item in items if inspect.ismodule(getattr(thing, item))]
items.difference_update(data["Modules"])
data["Methods"] = [
item for item in items if inspect.ismethod(getattr(thing, item))]
items.difference_update(data["Methods"])
data["Functions"] = [
item for item in items if inspect.isfunction(getattr(thing, item))]
items.difference_update(data["Functions"])
data["Classes"] = [
item for item in items if inspect.isclass(getattr(thing, item))]
items.difference_update(data["Classes"])
data["Data"] = list(items)
data["Ops"] = _map_dunders(thing, data["Dunders"])
_prune_data(thing, data)
# color operators
data["Ops"] = [colored(text, colorama.Fore.LIGHTBLUE_EX)
for text in data["Ops"]]
if not show_hidden:
hidden_names = ["Secrets", "Dunders"]
for name in hidden_names:
try:
del data[name]
except KeyError:
pass
# color types
newdata = []
for item in data["Data"]:
type_ = colored(type(getattr(thing, item)).__name__,
colorama.Fore.LIGHTCYAN_EX)
newdata.append("{}: {}".format(item, type_))
data["Data"] = newdata
# list-of-colums to list-of-rows
with_header = [
[key] + sorted(value) for key, value in data.items() if len(value) > 0]
rotated = [row for row in itertools.zip_longest(*with_header, fillvalue="")]
table = TABLETYPE(rotated)
try:
table.title = " {}: {} ".format(type(thing).__name__, thing.__name__)
except AttributeError:
table.title = " Class {} ".format(type(thing).__name__)
descr = pydoc.getdoc(thing).split(".")[0]
if descr:
print(" Description:\n{}.".format(descr))
print(table.table)
def explore(thing, show_hidden=False):
"""Show what you can do with an object.
Depending on the with explore_function or explore_object.
Note that built-in objects or functions might not be matched correctly.
"""
if (
inspect.isfunction(thing) or
inspect.ismethod(thing) or
inspect.isbuiltin(thing) # This can miss, e.g. print, namedtuple
):
explore_signature(thing, show_hidden=show_hidden)
elif inspect.isclass(thing):
explore_object(thing, show_hidden=show_hidden)
explore_signature(thing, show_hidden=show_hidden)
else:
explore_object(thing, show_hidden=show_hidden)
if __name__ == '__main__':
# explore(1)
# explore("")
# explore(list)
# explore(complex)
# def a_function(pos: int, /, both: float, untyped=4, *, kw_only: str = "blue") -> complex:
# """Kinds of arguments."""
# def variadic_function(*args, reverse=True, **kwargs):
# """Variadic arguments."""
# explore(a_function)
# explore(variadic_function)
# import requests
# explore(requests.Request)
import datetime
explore(datetime.datetime.now())
# import pathlib
# explore(pathlib)
import fractions
explore(fractions.Fraction)
# explore(open)
explore(property)
| 33.206081 | 95 | 0.603825 | # -*- coding: utf-8 -*-
"""Human readable object exploration module.
It is designed to be more verbose than the dir()-function, while being more
compact than help().
"""
from __future__ import print_function
__author__ = "Talon24"
__license__ = "MIT"
__version__ = "0.1.10"
__maintainer__ = "Talon24"
__url__ = "https://github.com/Talon24/explore"
__status__ = "Developement"
__all__ = ["explore", "explore_object", "explore_signature"]
import pydoc
import inspect
import itertools
import colorama
import terminaltables
# import pkg_resources
colorama.init()
TABLETYPE = terminaltables.DoubleTable
COLORIZE = True
# _MAPPING = pkg_resources.resource_string("explore", "mapping.json")
# Isn't created in a subdirectory without more than one module.
_MAPPING = {
"__add__": "+",
"__sub__": "-",
"__mul__": "*",
"__truediv__": "/",
"__floordiv__": "//",
"__matmul__": "@",
"__pow__": "**",
"__mod__": "%",
"__divmod__": "divmod",
"__and__": "&",
"__or__": "|",
"__xor__": "^",
"__lshift__": "<<",
"__rshift__": ">>",
"__iadd__": "+=",
"__isub__": "-=",
"__imul__": "*=",
"__itruediv__": "/=",
"__ifloordiv__": "//=",
"__imatmul__": "@=",
"__ipow__": "**=",
"__imod__": "%=",
"__iand__": "&=",
"__ior__": "|=",
"__ixor__": "^=",
"__ilshift__": "<<=",
"__irshift__": ">>=",
"__eq__": "==",
"__ne__": "!=",
"__lt__": "<",
"__gt__": ">",
"__leq__": "<=",
"__geq__": ">=",
"__invert__": "~",
"__pos__": "+()",
"__neg__": "-()",
"__abs__": "abs",
"__len__": "len",
"__int__": "int",
"__float__": "float",
"__round__": "round",
"__enter__": "with:",
"__await__": "await",
"__contains__": "in",
"__getitem__": "[]",
"__setitem__": "[] = x",
"__delitem__": "del x",
"__call__": "()"
}
def colored(data, color):
"""Color a string with colorama and reset."""
if COLORIZE:
return "{color}{data}{reset}".format(color=color, data=data,
reset=colorama.Style.RESET_ALL)
else:
return data
def _map_dunders(thing, items):
"""Match dunder methods to the operator/construct they are related to."""
ops = []
for item in items:
if item in _MAPPING:
text = _MAPPING[item]
ops.append(text)
# Special case: Hash. Classes can have hashes, but not their instances,
# or hash might be None.
# list has a __hash__ - attr (None), even though it is not hashable
if "__hash__" in items and thing.__hash__:
ops.append("hash")
return ops
def _prune_data(thing, data):
"""Move items out of the Data row."""
remappable = ("method_descriptor", "builtin_function_or_method")
uninteresting = ("PytestTester", "_Feature")
for item in data["Data"][:]:
typename = type(getattr(thing, item)).__name__
if typename in remappable or typename in uninteresting:
if typename in remappable:
if inspect.ismodule(thing):
data["Functions"].append(item)
else:
data["Methods"].append(item)
data["Data"].remove(item)
def _prune_arguments_list(data, header):
"""Remove default information from list of arguments if all are unset."""
type_index = header.index("Type")
if all(entry[type_index] == "Any" for entry in data):
for entry in data:
del entry[type_index]
del header[type_index]
kind_index = header.index("Kind")
if all(entry[kind_index] == "Positional Or Keyword" for entry in data):
for entry in data:
del entry[kind_index]
del header[kind_index]
def explore_signature(thing, show_hidden=False):
"""Show information about a function and its parameters as a table."""
try:
signature = inspect.signature(thing)
except ValueError as exc:
print(colored("{!r} does not reveal its signature.".format(
thing), colorama.Fore.RED))
standard_builtins = (__import__, breakpoint, dir, getattr, iter,
max, min, next, print, vars)
if thing in standard_builtins:
print(colored("Check the documentation at "
"https://docs.python.org/3/library/functions.html#{}"
" .".format(thing.__name__), colorama.Fore.RED))
return
empty = inspect.Signature.empty
header = ["Argument", "Default", "Type", "Kind"]
data = []
return_type = signature.return_annotation
for name, parameter in signature.parameters.items():
# kind = parameter.kind.name.replace("_", " ").title()
kind = parameter.kind.description
default = parameter.default
default = repr(default) if default is not empty else "---"
annotation = parameter.annotation
annotation = annotation.__name__ if annotation is not empty else "Any"
data.append([name, default, annotation, kind])
# Coloring
for row in data:
if row[0] in ("self", "cls"):
row[0] = colored(row[0], colorama.Fore.YELLOW)
elif row[1] == "---" and not row[3].startswith("var"):
# Required argument, as no default is set.
# Variadic is allowed to be empty, though.
row[0] = colored(row[0], colorama.Fore.RED)
if not show_hidden:
_prune_arguments_list(data, header)
# Convert to Table
table = TABLETYPE([header] + data)
if not inspect.isclass(thing):
table.title = " Function {} ".format(thing.__name__)
if return_type is not inspect.Signature.empty:
table.title += "-> {} ".format(return_type.__name__)
else:
table.title = " Constructor "
description = pydoc.getdoc(thing).split(".")[0]
if description:
print(" Description:\n{}.".format(description))
if not len(data) == 0:
print(table.table)
else:
print("This Function takes no arguments.")
def explore_object(thing, show_hidden=False):
"""Show dir(thing) as a table to make it more human readable."""
items = set(dir(thing))
data = dict()
# Extract members, assign them to categories
data["Dunders"] = [
item for item in items if item.startswith("__") and item.endswith("__")]
items.difference_update(data["Dunders"])
data["Secrets"] = [
item for item in items if item.startswith("_")]
items.difference_update(data["Secrets"])
data["Constants"] = [
item for item in items if item.isupper()]
items.difference_update(data["Constants"])
data["Modules"] = [
item for item in items if inspect.ismodule(getattr(thing, item))]
items.difference_update(data["Modules"])
data["Methods"] = [
item for item in items if inspect.ismethod(getattr(thing, item))]
items.difference_update(data["Methods"])
data["Functions"] = [
item for item in items if inspect.isfunction(getattr(thing, item))]
items.difference_update(data["Functions"])
data["Classes"] = [
item for item in items if inspect.isclass(getattr(thing, item))]
items.difference_update(data["Classes"])
data["Data"] = list(items)
data["Ops"] = _map_dunders(thing, data["Dunders"])
_prune_data(thing, data)
# color operators
data["Ops"] = [colored(text, colorama.Fore.LIGHTBLUE_EX)
for text in data["Ops"]]
if not show_hidden:
hidden_names = ["Secrets", "Dunders"]
for name in hidden_names:
try:
del data[name]
except KeyError:
pass
# color types
newdata = []
for item in data["Data"]:
type_ = colored(type(getattr(thing, item)).__name__,
colorama.Fore.LIGHTCYAN_EX)
newdata.append("{}: {}".format(item, type_))
data["Data"] = newdata
# list-of-colums to list-of-rows
with_header = [
[key] + sorted(value) for key, value in data.items() if len(value) > 0]
rotated = [row for row in itertools.zip_longest(*with_header, fillvalue="")]
table = TABLETYPE(rotated)
try:
table.title = " {}: {} ".format(type(thing).__name__, thing.__name__)
except AttributeError:
table.title = " Class {} ".format(type(thing).__name__)
descr = pydoc.getdoc(thing).split(".")[0]
if descr:
print(" Description:\n{}.".format(descr))
print(table.table)
def explore(thing, show_hidden=False):
"""Show what you can do with an object.
Depending on the with explore_function or explore_object.
Note that built-in objects or functions might not be matched correctly.
"""
if (
inspect.isfunction(thing) or
inspect.ismethod(thing) or
inspect.isbuiltin(thing) # This can miss, e.g. print, namedtuple
):
explore_signature(thing, show_hidden=show_hidden)
elif inspect.isclass(thing):
explore_object(thing, show_hidden=show_hidden)
explore_signature(thing, show_hidden=show_hidden)
else:
explore_object(thing, show_hidden=show_hidden)
if __name__ == '__main__':
# explore(1)
# explore("")
# explore(list)
# explore(complex)
# def a_function(pos: int, /, both: float, untyped=4, *, kw_only: str = "blue") -> complex:
# """Kinds of arguments."""
# def variadic_function(*args, reverse=True, **kwargs):
# """Variadic arguments."""
# explore(a_function)
# explore(variadic_function)
# import requests
# explore(requests.Request)
import datetime
explore(datetime.datetime.now())
# import pathlib
# explore(pathlib)
import fractions
explore(fractions.Fraction)
# explore(open)
explore(property)
| 0 | 0 | 0 |
068c4af7eb01ba82d5fbd3bb1bf85efbb1c36451 | 4,989 | py | Python | burger_war_dev/scripts/networks/maskNet.py | kenkenjlab/burger_war_dev | 0d6a85bca7896fa5cd7abc32cb082902523de983 | [
"BSD-3-Clause"
] | 2 | 2021-11-30T00:45:06.000Z | 2021-12-27T06:08:28.000Z | burger_war_dev/scripts/networks/maskNet.py | kenkenjlab/burger_war_dev | 0d6a85bca7896fa5cd7abc32cb082902523de983 | [
"BSD-3-Clause"
] | 9 | 2021-02-23T02:39:39.000Z | 2021-03-18T03:14:46.000Z | burger_war_dev/scripts/networks/maskNet.py | kenkenjlab/burger_war_dev | 0d6a85bca7896fa5cd7abc32cb082902523de983 | [
"BSD-3-Clause"
] | 2 | 2021-02-19T02:06:41.000Z | 2021-11-29T11:53:53.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
if __name__ == '__main__':
net = MaskNet(5)
# Summarize
#from torchinfo import summary
data_sizes = [
(2, 2),
(2, 1, 360),
(2, 3, 95, 160),
(2, 18),
]
#summary(net, data_sizes)
# Test run
for _ in range(3):
example(net, 'cpu')
if torch.cuda.is_available():
example(net, 'cuda:0')
else:
print('* CUDA not available.')
| 32.607843 | 86 | 0.48166 | import torch
import torch.nn as nn
import torch.nn.functional as F
class MaskNet(nn.Module):
def __init__(self, output_size, duel=True):
"""
Args:
output_size (int): size of output
"""
super(MaskNet, self).__init__()
self.duel = duel
'''
# input state
self.state = {
"pose": self.pose, # (N, 2)
"lidar": self.lidar_ranges, # (N, 1, 360)
"image": self.image, # (N, 3, 480, 640)
"mask": self.mask, # (N, 18)
}
'''
# OpenAI: Emergent Tool Use from Multi-Agent Interaction
# https://openai.com/blog/emergent-tool-use/
# https://pira-nino.hatenablog.com/entry/introduce_openai_hide-and-seek
# Core network
self.block_lidar = nn.Sequential(
# Input size: (1, 1, 360)
nn.Conv1d(1, 16, 3, padding=2, padding_mode='circular'), # (N, 16, 360)
nn.BatchNorm1d(16),
nn.ReLU(inplace=True),
nn.MaxPool1d(kernel_size=6), # (N, 16, 60)
nn.Conv1d(16, 32, 3, padding=2, padding_mode='circular'), # (N, 32, 60)
nn.BatchNorm1d(32),
nn.ReLU(inplace=True),
nn.MaxPool1d(kernel_size=3), # (N, 32, 20)
nn.Conv1d(32, 64, 3, padding=2, padding_mode='circular'), # (N, 64, 20)
nn.BatchNorm1d(64),
nn.ReLU(inplace=True),
nn.Flatten(),
nn.Linear(64*20, 64)
)
self.block_image = nn.Sequential(
# Input size: (1, 3, 95, 160)
nn.Conv2d(3, 16, kernel_size=3, padding=1), # (N, 16, 95, 160)
nn.BatchNorm2d(16),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=4), # (N, 16, 23, 40)
nn.Conv2d(16, 32, kernel_size=3, padding=1), # (N, 32, 23, 40)
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=4), # (N, 32, 5, 10)
nn.Conv2d(32, 32, kernel_size=3, padding=1), # (N, 32, 5, 10)
nn.BatchNorm2d(32),
nn.ReLU(inplace=True),
nn.Flatten(),
nn.Linear(32*5*10, 64)
)
# middle
self.fc1 = nn.Sequential(
nn.Linear(130, 192),
nn.BatchNorm1d(192),
nn.ReLU(inplace=True),
)
self.conv1 = nn.Sequential(
nn.Conv1d(3, 18, kernel_size=3, padding=1),
nn.BatchNorm1d(18),
nn.ReLU(inplace=True),
)
self.mask_fc = nn.Sequential(
nn.Linear(18, 18),
nn.ReLU(inplace=True),
)
# head
self.fc2 = nn.Linear(64, output_size)
# Dueling network
self.fc_adv = nn.Linear(64, output_size)
self.fc_val = nn.Linear(64, 1)
def forward(self, pose, lidar, image, mask):
# Core network
## Process each input
x = self.block_lidar(lidar) # (N, 64)
y = self.block_image(image) # (N, 64)
## Merge intermediate results
w = torch.cat([pose, x, y], dim=1) # (N, 130)
## Middle
w = self.fc1(w)
w = w.view(-1, 3, 64) # (N, 3, 64)
w = self.conv1(w) # (N, 18, 64)
## Mask
m = self.mask_fc(mask) # (N, 18)
m = m.view(-1, 1, 18) # (N, 1, 18)
w = torch.matmul(m, w) # (N, 1, 64)
w = w.view(-1, 64) # (N, 64)
## Head
if not self.duel:
w = self.fc2(w)
else:
# Dueling network
adv = self.fc_adv(w)
val = self.fc_val(w).expand(-1, adv.size(1))
w = val + adv - adv.mean(1, keepdim=True).expand(-1, adv.size(1))
return w
if __name__ == '__main__':
def example(net, device_name):
# Prepare sample datasets
device = torch.device(device_name)
net = net.to(device)
pose = torch.randn(data_sizes[0]).to(device)
lidar = torch.randn(data_sizes[1]).to(device)
image = torch.randn(data_sizes[2]).to(device)
mask = torch.randn(data_sizes[3]).to(device)
# Run
import time
print('[{}] Processing...'.format(device))
start_time = time.time()
val = net(pose, lidar, image, mask)
elapsed_time = time.time() - start_time
print('[{}] Done. {:.3f}[ms]'.format(device, elapsed_time))
print(val[0])
net = MaskNet(5)
# Summarize
#from torchinfo import summary
data_sizes = [
(2, 2),
(2, 1, 360),
(2, 3, 95, 160),
(2, 18),
]
#summary(net, data_sizes)
# Test run
for _ in range(3):
example(net, 'cpu')
if torch.cuda.is_available():
example(net, 'cuda:0')
else:
print('* CUDA not available.')
| 1,552 | 2,900 | 49 |
bebedd99a22484acf07e7ea12e8e6a17bd59da15 | 3,345 | py | Python | diag/draw_exon_sequence_graph.py | debamitro/rna-seq-diag | 04c26d37fb04ec61abba97eb4578ecb547c3f80d | [
"Apache-2.0"
] | 1 | 2022-02-08T20:11:20.000Z | 2022-02-08T20:11:20.000Z | diag/draw_exon_sequence_graph.py | debamitro/rna-seq-diag | 04c26d37fb04ec61abba97eb4578ecb547c3f80d | [
"Apache-2.0"
] | null | null | null | diag/draw_exon_sequence_graph.py | debamitro/rna-seq-diag | 04c26d37fb04ec61abba97eb4578ecb547c3f80d | [
"Apache-2.0"
] | null | null | null | #!python3
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection
if __name__ == "__main__":
from exons import make_exon_shapes, make_exons_unscaled, make_exon_exon_lines
else:
from diag.exons import make_exon_shapes, make_exons_unscaled, make_exon_exon_lines
configuration = {
"left_margin": 1000,
"right_margin": 1000,
"line_colors": ["xkcd:indigo", "xkcd:forest green", "xkcd:navy blue"],
}
def draw_exon_sequence_graph(
sequence_graph, y_exons=130, file_name=None, title=None, to_scale=True
):
"""Given a dictionary with two entries
- 'exons' an array of exon start and end offsets
- 'sequences' an array of exon sequences
draws a graph using different colors for each sequence.
The goal is to show different exon sequences formed from
one universal set of exons"""
_, ax = plt.subplots()
exons = sequence_graph["exons"]
if not to_scale:
unscaled_mapping, unscaled_exons = make_exons_unscaled(exons)
exons = unscaled_exons
patches = make_exon_shapes(exons, y_exons)
p = PatchCollection(patches)
sequence_height = 5
sequence_index = 0
draw_position = ["mid", "top", "bottom"]
for sequence in sequence_graph["sequences"]:
if not to_scale:
unscaled_sequence = [unscaled_mapping[x] for x in sequence]
sequence = unscaled_sequence
exon_pairs = zip(sequence, sequence[1:])
make_exon_exon_lines(
exon_pairs,
ax,
y_exons,
height=sequence_height,
draw_at=draw_position[sequence_index],
color=configuration["line_colors"][sequence_index],
)
sequence_height += 5
sequence_index += 1
if sequence_index >= len(configuration["line_colors"]):
sequence_index = 0
xmin = exons[0][0] - configuration["left_margin"]
xmax = exons[len(exons) - 1][1] + configuration["right_margin"]
if to_scale:
xtick_interval = (xmax - xmin) / 10
ax.set_xticks(np.arange(xmin, xmax, xtick_interval))
else:
ax.set_xticks([])
ax.set_yticks([y_exons])
if "id" in sequence_graph:
ax.set_yticklabels([sequence_graph["id"]])
ax.set_xbound(xmin, xmax)
ax.set_ybound(0, 200)
ax.add_collection(p)
if title is not None:
ax.set_title(title)
if file_name is None:
plt.show()
else:
plt.savefig(file_name)
if __name__ == "__main__":
# Contrived example using some exons from DDX11L1
draw_exon_sequence_graph(
{
"id": "gr1",
"exons": [
(12010, 12057),
(12179, 12227),
(12613, 12619),
(12975, 13052),
(13221, 13374),
(13453, 13670),
],
"sequences": [
[(12010, 12057), (12179, 12227), (12613, 12619), (12975, 13052)],
[
(12010, 12057),
(12613, 12619),
(12975, 13052),
(13221, 13374),
(13453, 13670),
],
],
},
file_name="out4.png",
title="Contrived example using some exons from DDX11L1",
to_scale=False,
)
| 29.342105 | 86 | 0.588341 | #!python3
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.collections import PatchCollection
if __name__ == "__main__":
from exons import make_exon_shapes, make_exons_unscaled, make_exon_exon_lines
else:
from diag.exons import make_exon_shapes, make_exons_unscaled, make_exon_exon_lines
configuration = {
"left_margin": 1000,
"right_margin": 1000,
"line_colors": ["xkcd:indigo", "xkcd:forest green", "xkcd:navy blue"],
}
def draw_exon_sequence_graph(
sequence_graph, y_exons=130, file_name=None, title=None, to_scale=True
):
"""Given a dictionary with two entries
- 'exons' an array of exon start and end offsets
- 'sequences' an array of exon sequences
draws a graph using different colors for each sequence.
The goal is to show different exon sequences formed from
one universal set of exons"""
_, ax = plt.subplots()
exons = sequence_graph["exons"]
if not to_scale:
unscaled_mapping, unscaled_exons = make_exons_unscaled(exons)
exons = unscaled_exons
patches = make_exon_shapes(exons, y_exons)
p = PatchCollection(patches)
sequence_height = 5
sequence_index = 0
draw_position = ["mid", "top", "bottom"]
for sequence in sequence_graph["sequences"]:
if not to_scale:
unscaled_sequence = [unscaled_mapping[x] for x in sequence]
sequence = unscaled_sequence
exon_pairs = zip(sequence, sequence[1:])
make_exon_exon_lines(
exon_pairs,
ax,
y_exons,
height=sequence_height,
draw_at=draw_position[sequence_index],
color=configuration["line_colors"][sequence_index],
)
sequence_height += 5
sequence_index += 1
if sequence_index >= len(configuration["line_colors"]):
sequence_index = 0
xmin = exons[0][0] - configuration["left_margin"]
xmax = exons[len(exons) - 1][1] + configuration["right_margin"]
if to_scale:
xtick_interval = (xmax - xmin) / 10
ax.set_xticks(np.arange(xmin, xmax, xtick_interval))
else:
ax.set_xticks([])
ax.set_yticks([y_exons])
if "id" in sequence_graph:
ax.set_yticklabels([sequence_graph["id"]])
ax.set_xbound(xmin, xmax)
ax.set_ybound(0, 200)
ax.add_collection(p)
if title is not None:
ax.set_title(title)
if file_name is None:
plt.show()
else:
plt.savefig(file_name)
if __name__ == "__main__":
# Contrived example using some exons from DDX11L1
draw_exon_sequence_graph(
{
"id": "gr1",
"exons": [
(12010, 12057),
(12179, 12227),
(12613, 12619),
(12975, 13052),
(13221, 13374),
(13453, 13670),
],
"sequences": [
[(12010, 12057), (12179, 12227), (12613, 12619), (12975, 13052)],
[
(12010, 12057),
(12613, 12619),
(12975, 13052),
(13221, 13374),
(13453, 13670),
],
],
},
file_name="out4.png",
title="Contrived example using some exons from DDX11L1",
to_scale=False,
)
| 0 | 0 | 0 |
ddacbac99937c21f354a657eca2e6fb65184f881 | 3,125 | py | Python | edotor/vision/find_circles.py | dgopstein/DOTFOR | 0fd2508c93494fde19e3bb764e6c81098b664e44 | [
"MIT"
] | null | null | null | edotor/vision/find_circles.py | dgopstein/DOTFOR | 0fd2508c93494fde19e3bb764e6c81098b664e44 | [
"MIT"
] | null | null | null | edotor/vision/find_circles.py | dgopstein/DOTFOR | 0fd2508c93494fde19e3bb764e6c81098b664e44 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import numpy as np
import cv2
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
import scipy
import scipy.signal
import math
import imutils
import img_util
card_regions = loadCardRegions()
orig_image = loadImage(card_regions[13]['file'])
image = imutils.resize(orig_image, width=400)
height, width, depth = image.shape
blurred = cv2.blur(image,(3,3),0)
hue, sat, val = hsv_img(blurred)
hough_circles = cv2.HoughCircles(sat, cv2.HOUGH_GRADIENT, .5, 10,
param1=10,
param2=20,
minRadius=2,
maxRadius=15)
circles = np.round(hough_circles[0, :]).astype("int")
print("finished detecting circles: ", len(circles))
displayCircles(image, circles)
destroyWindowOnKey()
radius_mode = radiiMode(circles)
#hist(circles[:,2], 100)
# make a binary image in which each pixel indicates
# if it's within the radius of a circle
angleMode(circles)
sized_cs = circles[np.where(np.logical_and(circles[:,2]>=.8*radius_mode, circles[:,2]<=1.2*radius_mode))]
len(circles)
len(sized_cs)
displayCircles(sat, circles)
displayCircles(sat, sized_cs)
destroyWindowOnKey()
circle_bin = circleBinImage(sized_cs)
showImage(circle_bin)
lines = cv2.HoughLines(circle_bin,1,np.pi/180,7).reshape(-1, 2)
showImage(drawLines(image, lines))
line_angle_clusters = cluster_1d(lines[:,1] % (math.pi/2), bw=0.05)
cardinal_lines = lines_with_label_in(lines, line_angle_clusters.labels_, [0])
showImage(drawLines(image, cardinal_lines))
clustered_lines = cluster_2d(cardinal_lines, 0.02)
showImage(drawLines(image, clustered_lines))
line_angle_clusters2 = cluster_1d(clustered_lines[:,1], 0.02)
clean_cardinal_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [0])
clean_cardinal_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [1])
showImage(drawLines(image, clean_cardinal_lines))
line_angle_clusters2 = cluster_1d(clustered_lines[:,1], 0.1)
a_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [0])
b_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [1])
a_lines.sort(0)
b_lines.sort(0)
line_pairs = list(itertools.product(a_lines, b_lines))
intersections = [seg_intersect(*polar2seg(*a), *polar2seg(*b))for (a, b) in line_pairs]
intersection_splotches_r = [n_closest(image[:,:,0], inter.astype(np.uint8), d=2) for inter in intersections]
([np.mean(splotch) for splotch in intersection_splotches_r])
showImage(n_closest(image, intersections[20].astype(np.uint8), d=1))
showImage(drawLines(image, clustered_lines))
showImage(drawPoints(image, intersections))
print(lines)
print('done')
| 30.048077 | 108 | 0.73888 | #!/usr/bin/env python3
import numpy as np
import cv2
import pandas as pd
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
import scipy
import scipy.signal
import math
import imutils
import img_util
def loadImage(path):
return cv2.imread(path, cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR)
card_regions = loadCardRegions()
orig_image = loadImage(card_regions[13]['file'])
image = imutils.resize(orig_image, width=400)
height, width, depth = image.shape
blurred = cv2.blur(image,(3,3),0)
hue, sat, val = hsv_img(blurred)
hough_circles = cv2.HoughCircles(sat, cv2.HOUGH_GRADIENT, .5, 10,
param1=10,
param2=20,
minRadius=2,
maxRadius=15)
circles = np.round(hough_circles[0, :]).astype("int")
print("finished detecting circles: ", len(circles))
displayCircles(image, circles)
destroyWindowOnKey()
radius_mode = radiiMode(circles)
#hist(circles[:,2], 100)
# make a binary image in which each pixel indicates
# if it's within the radius of a circle
def circleBinImage(circles):
bw = np.zeros((height,width,1), np.uint8)
for c in circles:
cv2.circle(bw,(c[0],c[1]),1,255,thickness=cv2.FILLED)
return bw
angleMode(circles)
sized_cs = circles[np.where(np.logical_and(circles[:,2]>=.8*radius_mode, circles[:,2]<=1.2*radius_mode))]
len(circles)
len(sized_cs)
displayCircles(sat, circles)
displayCircles(sat, sized_cs)
destroyWindowOnKey()
circle_bin = circleBinImage(sized_cs)
showImage(circle_bin)
lines = cv2.HoughLines(circle_bin,1,np.pi/180,7).reshape(-1, 2)
showImage(drawLines(image, lines))
line_angle_clusters = cluster_1d(lines[:,1] % (math.pi/2), bw=0.05)
cardinal_lines = lines_with_label_in(lines, line_angle_clusters.labels_, [0])
showImage(drawLines(image, cardinal_lines))
clustered_lines = cluster_2d(cardinal_lines, 0.02)
showImage(drawLines(image, clustered_lines))
line_angle_clusters2 = cluster_1d(clustered_lines[:,1], 0.02)
clean_cardinal_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [0])
clean_cardinal_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [1])
showImage(drawLines(image, clean_cardinal_lines))
line_angle_clusters2 = cluster_1d(clustered_lines[:,1], 0.1)
a_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [0])
b_lines = lines_with_label_in(clustered_lines, line_angle_clusters2.labels_, [1])
a_lines.sort(0)
b_lines.sort(0)
line_pairs = list(itertools.product(a_lines, b_lines))
intersections = [seg_intersect(*polar2seg(*a), *polar2seg(*b))for (a, b) in line_pairs]
intersection_splotches_r = [n_closest(image[:,:,0], inter.astype(np.uint8), d=2) for inter in intersections]
([np.mean(splotch) for splotch in intersection_splotches_r])
showImage(n_closest(image, intersections[20].astype(np.uint8), d=1))
showImage(drawLines(image, clustered_lines))
showImage(drawPoints(image, intersections))
print(lines)
print('done')
| 228 | 0 | 45 |
307bade4b0e1c211ce2807dcc4af9e9d1bedb885 | 21,070 | py | Python | python/rrc_example_package/code/utils.py | takuma-ynd/rrc_example_package | f53cf3191f4c38f4d1f394ccd55b1d935a6a70ba | [
"BSD-3-Clause"
] | null | null | null | python/rrc_example_package/code/utils.py | takuma-ynd/rrc_example_package | f53cf3191f4c38f4d1f394ccd55b1d935a6a70ba | [
"BSD-3-Clause"
] | null | null | null | python/rrc_example_package/code/utils.py | takuma-ynd/rrc_example_package | f53cf3191f4c38f4d1f394ccd55b1d935a6a70ba | [
"BSD-3-Clause"
] | null | null | null | import random
import numpy as np
import pybullet as p
import itertools
from rrc_simulation import visual_objects
from scipy.spatial.transform import Rotation as R
def apply_rotation_z(org_pos, theta):
'''
Apply 3 x 3 rotation matrix for rotation on xy-plane
'''
x_, y_, z_ = org_pos
x = x_ * np.cos(theta) - y_ * np.sin(theta)
y = x_ * np.sin(theta) + y_ * np.cos(theta)
z = z_
return x, y, z
def sample_from_normal_cube(cube_halfwidth, face=None, shrink_region=1.0, avoid_top=False,
sample_from_all_faces=False):
'''
sample from hypothetical cube that has no rotation and is located at (0, 0, 0)
NOTE: It does NOT sample point from the bottom face
It samples points with the following procedure:
1. choose one of the 5 faces (except the bottom)
2a. if the top face is chosen, just sample from there
2b. if a side face is chosen:
1. sample points from the front face
2. rotate the sampled points properly according to the selected face
'''
# 1. choose one of the faces:
if avoid_top:
faces = [0, 1, 2, 3]
elif sample_from_all_faces:
faces = [-2, -1, 0, 1, 2, 3]
else:
faces = [-1, 0, 1, 2, 3]
if face is None:
face = random.choice(faces)
if face not in faces:
raise KeyError(f'face {face} is not in the list of allowed faces: {faces}')
if face == -1:
# top
x, y = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
z = cube_halfwidth
elif face == -2:
# bottom (only allowed when sample_from_all_faces is enabled)
x, y = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
z = -cube_halfwidth
else:
# one of the side faces
# sample on the front xz-face
x_, z_ = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
y_ = -cube_halfwidth
# apply rotation to the points according to its face direction
rot_theta = face * np.pi / 2
x, y, z = apply_rotation_z((x_, y_, z_), rot_theta)
return x, y, z
def sample_heuristic_points(cube_halfwidth=0.0325, shrink_region=1.0):
'''
Sample three points on the normal cube heurisitcally.
One point is sampled on a side face, and the other two points are sampled
from the face stading on the other side.
The two points are sampled in a way that they are point symmetric w.r.t.
the center of the face.
'''
min_dist = cube_halfwidth * 0.1
# center of the front face
x_, z_ = 0, 0
y_ = -cube_halfwidth
center_point = (x_, y_, z_)
# two points that are point symmetric w.r.t. the center of the face
x_, z_ = 0, 0
while np.sqrt(x_ ** 2 + z_ ** 2) < min_dist: # rejection sampling
x_, z_ = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
y_ = -cube_halfwidth
x__, z__ = -x_, -z_ # point symetric w.r.t. the center point
y__ = y_
support_point1 = (x_, y_, z_)
support_point2 = (x__, y__, z__)
# sample two faces that are in parallel
faces = [0, 1, 2, 3]
face = random.choice(faces)
parallel_face = face + 2 % 4
# apply rotation to the points according to its face direction
sample_points = []
rot_theta = face * np.pi / 2
sample_points.append(np.asarray(apply_rotation_z(center_point, rot_theta),
dtype=np.float))
for point in [support_point1, support_point2]:
rot_theta = parallel_face * np.pi / 2
sample_points.append(np.asarray(apply_rotation_z(point, rot_theta),
dtype=np.float))
return sample_points
def sample_cube_surface_points(cube_halfwidth=0.0325,
shrink_region=0.8,
num_samples=3,
heuristic='pinch'):
'''
sample points on the surfaces of the cube except the one at the bottom.
NOTE: This function only works when the bottom face is fully touching on
the table.
Args:
cube_pos: Position (x, y, z)
cube_orientation: Orientation as quaternion (x, y, z, w)
cube_halfwidth: halfwidth of the cube (float)
shrink_region: shrink the sample region on each plane by the specified
coefficient (float)
num_samples: number of points to sample (int)
Returns:
List of sampled positions
'''
# Backward compatibility
if heuristic == 'pinch':
assert num_samples == 3, 'heuristic sampling only supports 3 samples'
norm_cube_samples = sample_heuristic_points(cube_halfwidth=cube_halfwidth,
shrink_region=shrink_region)
elif heuristic == 'center_of_three':
assert num_samples == 3
norm_cube_samples = sample_center_of_three(cube_halfwidth=cube_halfwidth)
elif heuristic == 'center_of_two':
assert num_samples == 3 #don't use this flag
norm_cube_samples = sample_center_of_two(cube_halfwidth=cube_halfwidth)
elif heuristic is None:
norm_cube_samples = [sample_from_normal_cube(cube_halfwidth,
shrink_region=shrink_region)
for _ in range(num_samples)]
else:
raise KeyError('Unrecognized heuristic value: {}. Use one of ["pinch", "center_of_three", None]'.format(heuristic))
# apply transformation
return np.array(norm_cube_samples)
# sample_points = apply_transform(cube_pos, cube_orientation,
# np.array(norm_cube_samples))
#
# return sample_points
class VisualMarkers:
'''Visualize spheres on the specified points'''
class VisualCubeOrientation:
'''visualize cube orientation by three cylinder'''
class CylinderMarker:
"""Visualize a cylinder."""
def __init__(
self, radius, length, position, orientation, color=(0, 1, 0, 0.5)):
"""
Create a cylinder marker for visualization
Args:
radius (float): radius of cylinder.
length (float): length of cylinder.
position: Position (x, y, z)
orientation: Orientation as quaternion (x, y, z, w)
color: Color of the cube as a tuple (r, b, g, q)
"""
self.shape_id = p.createVisualShape(
shapeType=p.GEOM_CYLINDER,
radius=radius,
length=length,
rgbaColor=color
)
self.body_id = p.createMultiBody(
baseVisualShapeIndex=self.shape_id,
basePosition=position,
baseOrientation=orientation
)
def set_state(self, position, orientation):
"""Set pose of the marker.
Args:
position: Position (x, y, z)
orientation: Orientation as quaternion (x, y, z, w)
"""
p.resetBasePositionAndOrientation(
self.body_id,
position,
orientation
)
import copy
from rrc_simulation.gym_wrapper.envs.cube_env import ActionType
class action_type_to:
'''
A Context Manager that sets action type and action space temporally
This applies to all wrappers and the origianl environment recursively ;)
'''
def repeat(sequence, num_repeat=3):
'''
[1,2,3] with num_repeat = 3 --> [1,1,1,2,2,2,3,3,3]
'''
return list(e for e in sequence for _ in range(num_repeat))
def ease_out(sequence, in_rep=1, out_rep=5):
'''
create "ease out" motion where an action is repeated for *out_rep* times at the end.
'''
in_seq_length = len(sequence[:-len(sequence) // 3])
out_seq_length = len(sequence[-len(sequence) // 3:])
x = [0, out_seq_length - 1]
rep = [in_rep, out_rep]
out_repeats = np.interp(list(range(out_seq_length)), x, rep).astype(int).tolist()
#in_repeats = np.ones(in_seq_length).astype(int).tolist()
in_repeats = np.ones(in_seq_length) * in_rep
in_repeats = in_repeats.astype(int).tolist()
repeats = in_repeats + out_repeats
assert len(repeats) == len(sequence)
seq = [repeat([e], n_rep) for e, n_rep in zip(sequence, repeats)]
seq = [y for x in seq for y in x] # flatten it
return seq
class frameskip_to:
'''
A Context Manager that sets action type and action space temporally
This applies to all wrappers and the origianl environment recursively ;)
'''
class keep_state:
'''
A Context Manager that preserves the state of the simulator
'''
| 35.651438 | 123 | 0.627765 | import random
import numpy as np
import pybullet as p
import itertools
from rrc_simulation import visual_objects
from scipy.spatial.transform import Rotation as R
def set_seed(seed=0):
import random
import numpy as np
import tensorflow as tf
import torch
random.seed(seed)
np.random.seed(seed)
tf.random.set_random_seed(seed)
torch.manual_seed(0)
def apply_rotation_z(org_pos, theta):
'''
Apply 3 x 3 rotation matrix for rotation on xy-plane
'''
x_, y_, z_ = org_pos
x = x_ * np.cos(theta) - y_ * np.sin(theta)
y = x_ * np.sin(theta) + y_ * np.cos(theta)
z = z_
return x, y, z
def apply_transform(pos, ori, points):
T = np.eye(4)
T[:3, :3] = np.array(p.getMatrixFromQuaternion(ori)).reshape((3, 3))
T[:3, -1] = pos
if len(points.shape) == 1:
points = points[None]
homogeneous = points.shape[-1] == 4
if not homogeneous:
points_homo = np.ones((points.shape[0], 4))
points_homo[:, :3] = points
points = points_homo
points = T.dot(points.T).T
if not homogeneous:
points = points[:, :3]
return points
def sample_from_normal_cube(cube_halfwidth, face=None, shrink_region=1.0, avoid_top=False,
sample_from_all_faces=False):
'''
sample from hypothetical cube that has no rotation and is located at (0, 0, 0)
NOTE: It does NOT sample point from the bottom face
It samples points with the following procedure:
1. choose one of the 5 faces (except the bottom)
2a. if the top face is chosen, just sample from there
2b. if a side face is chosen:
1. sample points from the front face
2. rotate the sampled points properly according to the selected face
'''
# 1. choose one of the faces:
if avoid_top:
faces = [0, 1, 2, 3]
elif sample_from_all_faces:
faces = [-2, -1, 0, 1, 2, 3]
else:
faces = [-1, 0, 1, 2, 3]
if face is None:
face = random.choice(faces)
if face not in faces:
raise KeyError(f'face {face} is not in the list of allowed faces: {faces}')
if face == -1:
# top
x, y = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
z = cube_halfwidth
elif face == -2:
# bottom (only allowed when sample_from_all_faces is enabled)
x, y = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
z = -cube_halfwidth
else:
# one of the side faces
# sample on the front xz-face
x_, z_ = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
y_ = -cube_halfwidth
# apply rotation to the points according to its face direction
rot_theta = face * np.pi / 2
x, y, z = apply_rotation_z((x_, y_, z_), rot_theta)
return x, y, z
def sample_heuristic_points(cube_halfwidth=0.0325, shrink_region=1.0):
'''
Sample three points on the normal cube heurisitcally.
One point is sampled on a side face, and the other two points are sampled
from the face stading on the other side.
The two points are sampled in a way that they are point symmetric w.r.t.
the center of the face.
'''
min_dist = cube_halfwidth * 0.1
# center of the front face
x_, z_ = 0, 0
y_ = -cube_halfwidth
center_point = (x_, y_, z_)
# two points that are point symmetric w.r.t. the center of the face
x_, z_ = 0, 0
while np.sqrt(x_ ** 2 + z_ ** 2) < min_dist: # rejection sampling
x_, z_ = np.random.uniform(low=-cube_halfwidth * shrink_region,
high=cube_halfwidth * shrink_region, size=2)
y_ = -cube_halfwidth
x__, z__ = -x_, -z_ # point symetric w.r.t. the center point
y__ = y_
support_point1 = (x_, y_, z_)
support_point2 = (x__, y__, z__)
# sample two faces that are in parallel
faces = [0, 1, 2, 3]
face = random.choice(faces)
parallel_face = face + 2 % 4
# apply rotation to the points according to its face direction
sample_points = []
rot_theta = face * np.pi / 2
sample_points.append(np.asarray(apply_rotation_z(center_point, rot_theta),
dtype=np.float))
for point in [support_point1, support_point2]:
rot_theta = parallel_face * np.pi / 2
sample_points.append(np.asarray(apply_rotation_z(point, rot_theta),
dtype=np.float))
return sample_points
def sample_center_of_three(cube_halfwidth=0.0325, shrink_region=1.0):
# center of the front face
x_, z_ = 0, 0
y_ = -cube_halfwidth
center_point = (x_, y_, z_)
faces = [0, 1, 2, 3]
sample_points = []
start = random.choice(faces)
for i in range(3):
rot_theta = ((i + start) % 4 )* np.pi / 2
sample_points.append(np.asarray(apply_rotation_z(center_point, rot_theta),
dtype=np.float))
return sample_points
def sample_center_of_two(cube_halfwidth=0.0325, shrink_region=1.0):
# center of the front face
x_, z_ = 0, 0
y_ = -cube_halfwidth
center_point = (x_, y_, z_)
faces = [0, 1, 2, 3]
sample_points = []
start = random.choice(faces)
for i in range(2):
rot_theta = ((2 * i + start) % 4 )* np.pi / 2
sample_points.append(np.asarray(R.from_euler('z', rot_theta).apply(center_point),
dtype=np.float))
#hacky position definition
sample_points.append(np.asarray([np.inf, np.inf, np.inf]))
return np.asarray(sample_points)
def sample_cube_surface_points(cube_halfwidth=0.0325,
shrink_region=0.8,
num_samples=3,
heuristic='pinch'):
'''
sample points on the surfaces of the cube except the one at the bottom.
NOTE: This function only works when the bottom face is fully touching on
the table.
Args:
cube_pos: Position (x, y, z)
cube_orientation: Orientation as quaternion (x, y, z, w)
cube_halfwidth: halfwidth of the cube (float)
shrink_region: shrink the sample region on each plane by the specified
coefficient (float)
num_samples: number of points to sample (int)
Returns:
List of sampled positions
'''
# Backward compatibility
if heuristic == 'pinch':
assert num_samples == 3, 'heuristic sampling only supports 3 samples'
norm_cube_samples = sample_heuristic_points(cube_halfwidth=cube_halfwidth,
shrink_region=shrink_region)
elif heuristic == 'center_of_three':
assert num_samples == 3
norm_cube_samples = sample_center_of_three(cube_halfwidth=cube_halfwidth)
elif heuristic == 'center_of_two':
assert num_samples == 3 #don't use this flag
norm_cube_samples = sample_center_of_two(cube_halfwidth=cube_halfwidth)
elif heuristic is None:
norm_cube_samples = [sample_from_normal_cube(cube_halfwidth,
shrink_region=shrink_region)
for _ in range(num_samples)]
else:
raise KeyError('Unrecognized heuristic value: {}. Use one of ["pinch", "center_of_three", None]'.format(heuristic))
# apply transformation
return np.array(norm_cube_samples)
# sample_points = apply_transform(cube_pos, cube_orientation,
# np.array(norm_cube_samples))
#
# return sample_points
class VisualMarkers:
'''Visualize spheres on the specified points'''
def __init__(self):
self.markers = []
def add(self, points, radius=0.015, color=None):
if isinstance(points[0], (int, float)):
points = [points]
if color is None:
color = (0, 1, 1, 0.5)
for point in points:
self.markers.append(
visual_objects.SphereMaker(radius, point, color=color))
def remove(self):
self.markers = []
class VisualCubeOrientation:
'''visualize cube orientation by three cylinder'''
def __init__(self, cube_position, cube_orientation, cube_halfwidth=0.0325):
self.markers = []
self.cube_halfwidth = cube_halfwidth
color_cycle = [[1, 0, 0, 0.6], [0, 1, 0, 0.6], [0, 0, 1, 0.6]]
self.z_axis = np.asarray([0,0,1])
const = 1 / np.sqrt(2)
x_rot = R.from_quat([const, 0, const, 0])
y_rot = R.from_quat([0, const, const, 0])
z_rot = R.from_quat([0,0,0,1])
assert( np.linalg.norm( x_rot.apply(self.z_axis) - np.asarray([1., 0., 0.]) ) < 0.00000001)
assert( np.linalg.norm( y_rot.apply(self.z_axis) - np.asarray([0., 1., 0.]) ) < 0.00000001)
assert( np.linalg.norm( z_rot.apply(self.z_axis) - np.asarray([0., 0., 1.]) ) < 0.00000001)
self.rotations = [x_rot, y_rot, z_rot]
cube_rot = R.from_quat(cube_orientation)
#x: red , y: green, z: blue
for rot, color in zip(self.rotations, color_cycle):
rotation = cube_rot * rot
orientation = rotation.as_quat()
bias = rotation.apply(self.z_axis) * cube_halfwidth
self.markers.append(
CylinderMarker(radius=cube_halfwidth/20,
length=cube_halfwidth*2,
position=cube_position + bias,
orientation=orientation,
color=color)
)
def set_state(self, position, orientation):
cube_rot = R.from_quat(orientation)
for rot, marker in zip(self.rotations, self.markers):
rotation = cube_rot * rot
orientation = rotation.as_quat()
bias = rotation.apply(self.z_axis) * self.cube_halfwidth
marker.set_state(position=position + bias,
orientation=orientation)
class CylinderMarker:
"""Visualize a cylinder."""
def __init__(
self, radius, length, position, orientation, color=(0, 1, 0, 0.5)):
"""
Create a cylinder marker for visualization
Args:
radius (float): radius of cylinder.
length (float): length of cylinder.
position: Position (x, y, z)
orientation: Orientation as quaternion (x, y, z, w)
color: Color of the cube as a tuple (r, b, g, q)
"""
self.shape_id = p.createVisualShape(
shapeType=p.GEOM_CYLINDER,
radius=radius,
length=length,
rgbaColor=color
)
self.body_id = p.createMultiBody(
baseVisualShapeIndex=self.shape_id,
basePosition=position,
baseOrientation=orientation
)
def set_state(self, position, orientation):
"""Set pose of the marker.
Args:
position: Position (x, y, z)
orientation: Orientation as quaternion (x, y, z, w)
"""
p.resetBasePositionAndOrientation(
self.body_id,
position,
orientation
)
def is_valid_action(action, action_type='position'):
from rrc_simulation.trifinger_platform import TriFingerPlatform
spaces = TriFingerPlatform.spaces
if action_type == 'position':
action_space = spaces.robot_position
elif action_type == 'torque':
action_space = spaces.robot_position
return (action_space.low <= action).all() and (action <= action_space.high).all()
import copy
from rrc_simulation.gym_wrapper.envs.cube_env import ActionType
class action_type_to:
'''
A Context Manager that sets action type and action space temporally
This applies to all wrappers and the origianl environment recursively ;)
'''
def __init__(self, action_type, env):
self.action_type = action_type
self.action_space = self._get_action_space(action_type)
self.org_action_type = env.action_type
self.org_action_space = env.action_space
self.env = env
def __enter__(self):
current_env = self.env
self.set_action_type_and_space(current_env)
while hasattr(current_env, 'env'):
current_env = current_env.env
self.set_action_type_and_space(current_env)
def __exit__(self, type, value, traceback):
current_env = self.env
self.revert_action_type_and_space(current_env)
while hasattr(current_env, 'env'):
current_env = current_env.env
self.revert_action_type_and_space(current_env)
def set_action_type_and_space(self, env):
env.action_space = self.action_space
env.action_type = self.action_type
def revert_action_type_and_space(self, env):
env.action_space = self.org_action_space
env.action_type = self.org_action_type
def _get_action_space(self, action_type):
import gym
from rrc_simulation import TriFingerPlatform
spaces = TriFingerPlatform.spaces
if action_type == ActionType.TORQUE:
action_space = spaces.robot_torque.gym
elif action_type == ActionType.POSITION:
action_space = spaces.robot_position.gym
elif action_type == ActionType.TORQUE_AND_POSITION:
action_space = gym.spaces.Dict(
{
"torque": spaces.robot_torque.gym,
"position": spaces.robot_position.gym,
}
)
else:
ValueError('unknown action type')
return action_space
def repeat(sequence, num_repeat=3):
'''
[1,2,3] with num_repeat = 3 --> [1,1,1,2,2,2,3,3,3]
'''
return list(e for e in sequence for _ in range(num_repeat))
def ease_out(sequence, in_rep=1, out_rep=5):
'''
create "ease out" motion where an action is repeated for *out_rep* times at the end.
'''
in_seq_length = len(sequence[:-len(sequence) // 3])
out_seq_length = len(sequence[-len(sequence) // 3:])
x = [0, out_seq_length - 1]
rep = [in_rep, out_rep]
out_repeats = np.interp(list(range(out_seq_length)), x, rep).astype(int).tolist()
#in_repeats = np.ones(in_seq_length).astype(int).tolist()
in_repeats = np.ones(in_seq_length) * in_rep
in_repeats = in_repeats.astype(int).tolist()
repeats = in_repeats + out_repeats
assert len(repeats) == len(sequence)
seq = [repeat([e], n_rep) for e, n_rep in zip(sequence, repeats)]
seq = [y for x in seq for y in x] # flatten it
return seq
class frameskip_to:
'''
A Context Manager that sets action type and action space temporally
This applies to all wrappers and the origianl environment recursively ;)
'''
def __init__(self, frameskip, env):
self.frameskip = frameskip
self.env = env
self.org_frameskip = env.unwrapped.frameskip
def __enter__(self):
self.env.unwrapped.frameskip = self.frameskip
def __exit__(self, type, value, traceback):
self.env.unwrapped.frameskip = self.org_frameskip
class keep_state:
'''
A Context Manager that preserves the state of the simulator
'''
def __init__(self, env):
self.finger_id = env.platform.simfinger.finger_id
self.joints = env.platform.simfinger.pybullet_link_indices
self.cube_id = env.platform.cube.block
def __enter__(self):
self.state_id = p.saveState()
def __exit__(self, type, value, traceback):
p.restoreState(stateId=self.state_id)
class IKUtils:
def __init__(self, env):
from .const import INIT_JOINT_CONF
self.fk = env.platform.simfinger.pinocchio_utils.forward_kinematics
self.ik = env.platform.simfinger.pinocchio_utils.inverse_kinematics
self.finger_id = env.platform.simfinger.finger_id
self.tip_ids = env.platform.simfinger.pybullet_tip_link_indices
self.link_ids = env.platform.simfinger.pybullet_link_indices
self.cube_id = env.platform.cube.block
self.env = env
self.tips_init = self.fk(INIT_JOINT_CONF)
def sample_no_collision_ik(self, target_tip_positions, sort_tips=False, slacky_collision=False):
from pybullet_planning.interfaces.kinematics.ik_utils import sample_multiple_ik_with_collision
with keep_state(self.env):
if sort_tips:
target_tip_positions, _ = self._assign_positions_to_fingers(target_tip_positions)
collision_fn = self._get_collision_fn(slacky_collision)
sample_fn = self._get_sample_fn()
solutions = sample_multiple_ik_with_collision(self.ik, collision_fn, sample_fn,
target_tip_positions, num_samples=3)
return solutions
def _get_collision_fn(self, slacky_collision):
from pybullet_planning.interfaces.robots.collision import get_collision_fn
return get_collision_fn(**self._get_collision_conf(slacky_collision))
def _get_collision_conf(self, slacky_collision):
from .const import COLLISION_TOLERANCE
if slacky_collision:
disabled_collisions = [((self.finger_id, tip_id), (self.cube_id, -1))
for tip_id in self.tip_ids]
config = {
'body': self.finger_id,
'joints': self.link_ids,
'obstacles': [self.cube_id],
'self_collisions': True,
'extra_disabled_collisions': disabled_collisions,
'max_distance': -COLLISION_TOLERANCE
}
else:
config = {
'body': self.finger_id,
'joints': self.link_ids,
'obstacles': [self.cube_id],
'self_collisions': False
}
return config
def _get_sample_fn(self):
space = self.env.platform.spaces.robot_position.gym
def _sample_fn():
s = np.random.rand(space.shape[0])
return s * (space.high - space.low) + space.low
return _sample_fn
def _assign_positions_to_fingers(self, tips):
min_cost = 1000000
opt_tips = []
opt_inds = [0, 1, 2]
for v in itertools.permutations([0, 1, 2]):
sorted_tips = tips[v, :]
cost = np.linalg.norm(sorted_tips - self.tips_init)
if min_cost > cost:
min_cost = cost
opt_tips = sorted_tips
opt_inds = v
return opt_tips, opt_inds
def get_joint_conf(self):
obs = self.env.platform.simfinger._get_latest_observation()
return obs.position, obs.velocity
def get_body_state(body_id):
position, orientation = p.getBasePositionAndOrientation(
body_id
)
velocity = p.getBaseVelocity(body_id)
return list(position), list(orientation), list(velocity)
def set_body_state(body_id, position, orientation, velocity):
p.resetBasePositionAndOrientation(
body_id,
position,
orientation,
)
linear_vel, angular_vel = velocity
p.resetBaseVelocity(body_id, linear_vel, angular_vel)
class AssertNoStateChanges:
def __init__(self, env):
self.cube_id = env.platform.cube.block
self.finger_id = env.platform.simfinger.finger_id
self.finger_links = env.platform.simfinger.pybullet_link_indices
def __enter__(self):
from .utils import get_body_state, set_body_state
from pybullet_planning.interfaces.robots.joint import get_joint_velocities, get_joint_positions
org_obj_pos, org_obj_ori, org_obj_vel = get_body_state(self.cube_id)
self.org_obj_pos = org_obj_pos
self.org_obj_ori = org_obj_ori
self.org_obj_vel = org_obj_vel
self.org_joint_pos = get_joint_positions(self.finger_id, self.finger_links)
self.org_joint_vel = get_joint_velocities(self.finger_id, self.finger_links)
def __exit__(self, type, value, traceback):
from pybullet_planning.interfaces.robots.joint import get_joint_velocities, get_joint_positions
obj_pos, obj_ori, obj_vel = get_body_state(self.cube_id)
np.testing.assert_array_almost_equal(self.org_obj_pos, obj_pos)
np.testing.assert_array_almost_equal(self.org_obj_ori, obj_ori)
np.testing.assert_array_almost_equal(self.org_obj_vel[0], obj_vel[0])
np.testing.assert_array_almost_equal(self.org_obj_vel[1], obj_vel[1])
joint_pos = get_joint_positions(self.finger_id, self.finger_links)
joint_vel = get_joint_velocities(self.finger_id, self.finger_links)
np.testing.assert_array_almost_equal(self.org_joint_pos, joint_pos)
np.testing.assert_array_almost_equal(self.org_joint_vel, joint_vel)
| 11,257 | -1 | 929 |